Tejun Heo | 7cc0158 | 2010-08-03 13:14:58 +0200 | [diff] [blame] | 1 | /* |
| 2 | * Block data types and constants. Directly include this file only to |
| 3 | * break include dependency loop. |
| 4 | */ |
| 5 | #ifndef __LINUX_BLK_TYPES_H |
| 6 | #define __LINUX_BLK_TYPES_H |
| 7 | |
Tejun Heo | 7cc0158 | 2010-08-03 13:14:58 +0200 | [diff] [blame] | 8 | #include <linux/types.h> |
Ming Lei | 0781e79 | 2016-05-30 21:34:30 +0800 | [diff] [blame] | 9 | #include <linux/bvec.h> |
Tejun Heo | 7cc0158 | 2010-08-03 13:14:58 +0200 | [diff] [blame] | 10 | |
| 11 | struct bio_set; |
| 12 | struct bio; |
| 13 | struct bio_integrity_payload; |
| 14 | struct page; |
| 15 | struct block_device; |
Tejun Heo | 852c788 | 2012-03-05 13:15:27 -0800 | [diff] [blame] | 16 | struct io_context; |
| 17 | struct cgroup_subsys_state; |
Christoph Hellwig | 4246a0b | 2015-07-20 15:29:37 +0200 | [diff] [blame] | 18 | typedef void (bio_end_io_t) (struct bio *); |
Tejun Heo | 7cc0158 | 2010-08-03 13:14:58 +0200 | [diff] [blame] | 19 | |
Al Viro | 62a8067 | 2014-04-04 23:12:29 -0400 | [diff] [blame] | 20 | #ifdef CONFIG_BLOCK |
Tejun Heo | 7cc0158 | 2010-08-03 13:14:58 +0200 | [diff] [blame] | 21 | /* |
| 22 | * main unit of I/O for the block layer and lower layers (ie drivers and |
| 23 | * stacking drivers) |
| 24 | */ |
| 25 | struct bio { |
Tejun Heo | 7cc0158 | 2010-08-03 13:14:58 +0200 | [diff] [blame] | 26 | struct bio *bi_next; /* request queue link */ |
| 27 | struct block_device *bi_bdev; |
Jens Axboe | 2c68f6d | 2015-07-28 13:14:32 -0600 | [diff] [blame] | 28 | int bi_error; |
Jens Axboe | 1eff9d3 | 2016-08-05 15:35:16 -0600 | [diff] [blame] | 29 | unsigned int bi_opf; /* bottom bits req flags, |
| 30 | * top bits REQ_OP. Use |
| 31 | * accessors. |
Mike Christie | 4e1b2d52 | 2016-06-05 14:32:22 -0500 | [diff] [blame] | 32 | */ |
Christoph Hellwig | c0acf12 | 2016-07-19 11:28:43 +0200 | [diff] [blame] | 33 | unsigned short bi_flags; /* status, command, etc */ |
Mike Christie | 43b62ce | 2016-06-05 14:32:20 -0500 | [diff] [blame] | 34 | unsigned short bi_ioprio; |
Tejun Heo | 7cc0158 | 2010-08-03 13:14:58 +0200 | [diff] [blame] | 35 | |
Kent Overstreet | 4f024f3 | 2013-10-11 15:44:27 -0700 | [diff] [blame] | 36 | struct bvec_iter bi_iter; |
Tejun Heo | 7cc0158 | 2010-08-03 13:14:58 +0200 | [diff] [blame] | 37 | |
| 38 | /* Number of segments in this BIO after |
| 39 | * physical address coalescing is performed. |
| 40 | */ |
| 41 | unsigned int bi_phys_segments; |
| 42 | |
Tejun Heo | 7cc0158 | 2010-08-03 13:14:58 +0200 | [diff] [blame] | 43 | /* |
| 44 | * To keep track of the max segment size, we account for the |
| 45 | * sizes of the first and last mergeable segments in this bio. |
| 46 | */ |
| 47 | unsigned int bi_seg_front_size; |
| 48 | unsigned int bi_seg_back_size; |
| 49 | |
Jens Axboe | c4cf526 | 2015-04-17 16:15:18 -0600 | [diff] [blame] | 50 | atomic_t __bi_remaining; |
Kent Overstreet | 196d38b | 2013-11-23 18:34:15 -0800 | [diff] [blame] | 51 | |
Tejun Heo | 7cc0158 | 2010-08-03 13:14:58 +0200 | [diff] [blame] | 52 | bio_end_io_t *bi_end_io; |
| 53 | |
| 54 | void *bi_private; |
Tejun Heo | 852c788 | 2012-03-05 13:15:27 -0800 | [diff] [blame] | 55 | #ifdef CONFIG_BLK_CGROUP |
| 56 | /* |
| 57 | * Optional ioc and css associated with this bio. Put on bio |
| 58 | * release. Read comment on top of bio_associate_current(). |
| 59 | */ |
| 60 | struct io_context *bi_ioc; |
| 61 | struct cgroup_subsys_state *bi_css; |
| 62 | #endif |
Martin K. Petersen | 180b2f9 | 2014-09-26 19:19:56 -0400 | [diff] [blame] | 63 | union { |
Tejun Heo | 7cc0158 | 2010-08-03 13:14:58 +0200 | [diff] [blame] | 64 | #if defined(CONFIG_BLK_DEV_INTEGRITY) |
Martin K. Petersen | 180b2f9 | 2014-09-26 19:19:56 -0400 | [diff] [blame] | 65 | struct bio_integrity_payload *bi_integrity; /* data integrity */ |
Tejun Heo | 7cc0158 | 2010-08-03 13:14:58 +0200 | [diff] [blame] | 66 | #endif |
Martin K. Petersen | 180b2f9 | 2014-09-26 19:19:56 -0400 | [diff] [blame] | 67 | }; |
Tejun Heo | 7cc0158 | 2010-08-03 13:14:58 +0200 | [diff] [blame] | 68 | |
Kent Overstreet | 4f024f3 | 2013-10-11 15:44:27 -0700 | [diff] [blame] | 69 | unsigned short bi_vcnt; /* how many bio_vec's */ |
| 70 | |
Kent Overstreet | f44b48c | 2012-09-06 15:34:58 -0700 | [diff] [blame] | 71 | /* |
| 72 | * Everything starting with bi_max_vecs will be preserved by bio_reset() |
| 73 | */ |
| 74 | |
Kent Overstreet | 4f024f3 | 2013-10-11 15:44:27 -0700 | [diff] [blame] | 75 | unsigned short bi_max_vecs; /* max bvl_vecs we can hold */ |
Kent Overstreet | f44b48c | 2012-09-06 15:34:58 -0700 | [diff] [blame] | 76 | |
Jens Axboe | dac5621 | 2015-04-17 16:23:59 -0600 | [diff] [blame] | 77 | atomic_t __bi_cnt; /* pin count */ |
Kent Overstreet | f44b48c | 2012-09-06 15:34:58 -0700 | [diff] [blame] | 78 | |
| 79 | struct bio_vec *bi_io_vec; /* the actual vec list */ |
| 80 | |
Kent Overstreet | 395c72a | 2012-09-06 15:34:55 -0700 | [diff] [blame] | 81 | struct bio_set *bi_pool; |
| 82 | |
Tejun Heo | 7cc0158 | 2010-08-03 13:14:58 +0200 | [diff] [blame] | 83 | /* |
| 84 | * We can inline a number of vecs at the end of the bio, to avoid |
| 85 | * double allocations for a small number of bio_vecs. This member |
| 86 | * MUST obviously be kept at the very end of the bio. |
| 87 | */ |
| 88 | struct bio_vec bi_inline_vecs[0]; |
| 89 | }; |
| 90 | |
Bart Van Assche | 637ca77 | 2016-09-14 10:44:12 +0200 | [diff] [blame] | 91 | #define BIO_OP_SHIFT (8 * FIELD_SIZEOF(struct bio, bi_opf) - REQ_OP_BITS) |
Bart Van Assche | 4382e33 | 2016-09-14 10:45:36 +0200 | [diff] [blame] | 92 | #define bio_flags(bio) ((bio)->bi_opf & ((1 << BIO_OP_SHIFT) - 1)) |
Jens Axboe | 1eff9d3 | 2016-08-05 15:35:16 -0600 | [diff] [blame] | 93 | #define bio_op(bio) ((bio)->bi_opf >> BIO_OP_SHIFT) |
Mike Christie | 4e1b2d52 | 2016-06-05 14:32:22 -0500 | [diff] [blame] | 94 | |
Bart Van Assche | 3e1de31 | 2016-09-14 10:46:22 +0200 | [diff] [blame] | 95 | #define bio_set_op_attrs(bio, op, op_flags) do { \ |
| 96 | if (__builtin_constant_p(op)) \ |
| 97 | BUILD_BUG_ON((op) + 0U >= (1U << REQ_OP_BITS)); \ |
| 98 | else \ |
| 99 | WARN_ON_ONCE((op) + 0U >= (1U << REQ_OP_BITS)); \ |
| 100 | if (__builtin_constant_p(op_flags)) \ |
| 101 | BUILD_BUG_ON((op_flags) + 0U >= (1U << BIO_OP_SHIFT)); \ |
| 102 | else \ |
| 103 | WARN_ON_ONCE((op_flags) + 0U >= (1U << BIO_OP_SHIFT)); \ |
| 104 | (bio)->bi_opf = bio_flags(bio); \ |
| 105 | (bio)->bi_opf |= (((op) + 0U) << BIO_OP_SHIFT); \ |
| 106 | (bio)->bi_opf |= (op_flags); \ |
Mike Christie | 4e1b2d52 | 2016-06-05 14:32:22 -0500 | [diff] [blame] | 107 | } while (0) |
| 108 | |
Kent Overstreet | f44b48c | 2012-09-06 15:34:58 -0700 | [diff] [blame] | 109 | #define BIO_RESET_BYTES offsetof(struct bio, bi_max_vecs) |
| 110 | |
Tejun Heo | 7cc0158 | 2010-08-03 13:14:58 +0200 | [diff] [blame] | 111 | /* |
| 112 | * bio flags |
| 113 | */ |
Jens Axboe | b2dbe0a | 2015-05-19 09:18:28 -0600 | [diff] [blame] | 114 | #define BIO_SEG_VALID 1 /* bi_phys_segments valid */ |
| 115 | #define BIO_CLONED 2 /* doesn't own data */ |
| 116 | #define BIO_BOUNCED 3 /* bio is a bounce bio */ |
| 117 | #define BIO_USER_MAPPED 4 /* contains user pages */ |
| 118 | #define BIO_NULL_MAPPED 5 /* contains invalid user pages */ |
| 119 | #define BIO_QUIET 6 /* Make BIO Quiet */ |
Jan Kara | a3ad0a9 | 2015-06-18 17:19:14 +0200 | [diff] [blame] | 120 | #define BIO_CHAIN 7 /* chained bio, ->bi_remaining in effect */ |
| 121 | #define BIO_REFFED 8 /* bio has elevated ->bi_cnt */ |
Kent Overstreet | f44b48c | 2012-09-06 15:34:58 -0700 | [diff] [blame] | 122 | |
| 123 | /* |
| 124 | * Flags starting here get preserved by bio_reset() - this includes |
Christoph Hellwig | ed996a5 | 2016-07-19 11:28:42 +0200 | [diff] [blame] | 125 | * BVEC_POOL_IDX() |
Kent Overstreet | f44b48c | 2012-09-06 15:34:58 -0700 | [diff] [blame] | 126 | */ |
Christoph Hellwig | c0acf12 | 2016-07-19 11:28:43 +0200 | [diff] [blame] | 127 | #define BIO_RESET_BITS 10 |
Kent Overstreet | f44b48c | 2012-09-06 15:34:58 -0700 | [diff] [blame] | 128 | |
Tejun Heo | 7cc0158 | 2010-08-03 13:14:58 +0200 | [diff] [blame] | 129 | /* |
Christoph Hellwig | ed996a5 | 2016-07-19 11:28:42 +0200 | [diff] [blame] | 130 | * We support 6 different bvec pools, the last one is magic in that it |
| 131 | * is backed by a mempool. |
Tejun Heo | 7cc0158 | 2010-08-03 13:14:58 +0200 | [diff] [blame] | 132 | */ |
Christoph Hellwig | ed996a5 | 2016-07-19 11:28:42 +0200 | [diff] [blame] | 133 | #define BVEC_POOL_NR 6 |
| 134 | #define BVEC_POOL_MAX (BVEC_POOL_NR - 1) |
| 135 | |
| 136 | /* |
| 137 | * Top 4 bits of bio flags indicate the pool the bvecs came from. We add |
| 138 | * 1 to the actual index so that 0 indicates that there are no bvecs to be |
| 139 | * freed. |
| 140 | */ |
| 141 | #define BVEC_POOL_BITS (4) |
Christoph Hellwig | c0acf12 | 2016-07-19 11:28:43 +0200 | [diff] [blame] | 142 | #define BVEC_POOL_OFFSET (16 - BVEC_POOL_BITS) |
Christoph Hellwig | ed996a5 | 2016-07-19 11:28:42 +0200 | [diff] [blame] | 143 | #define BVEC_POOL_IDX(bio) ((bio)->bi_flags >> BVEC_POOL_OFFSET) |
Tejun Heo | 7cc0158 | 2010-08-03 13:14:58 +0200 | [diff] [blame] | 144 | |
Jens Axboe | de75d60 | 2010-08-10 12:14:27 -0400 | [diff] [blame] | 145 | #endif /* CONFIG_BLOCK */ |
| 146 | |
Tejun Heo | 7cc0158 | 2010-08-03 13:14:58 +0200 | [diff] [blame] | 147 | /* |
| 148 | * Request flags. For use in the cmd_flags field of struct request, and in |
Jens Axboe | 1eff9d3 | 2016-08-05 15:35:16 -0600 | [diff] [blame] | 149 | * bi_opf of struct bio. Note that some flags are only valid in either one. |
Tejun Heo | 7cc0158 | 2010-08-03 13:14:58 +0200 | [diff] [blame] | 150 | */ |
| 151 | enum rq_flag_bits { |
| 152 | /* common flags */ |
Tejun Heo | 7cc0158 | 2010-08-03 13:14:58 +0200 | [diff] [blame] | 153 | __REQ_FAILFAST_DEV, /* no driver retries of device errors */ |
| 154 | __REQ_FAILFAST_TRANSPORT, /* no driver retries of transport errors */ |
| 155 | __REQ_FAILFAST_DRIVER, /* no driver retries of driver errors */ |
| 156 | |
Tejun Heo | 7cc0158 | 2010-08-03 13:14:58 +0200 | [diff] [blame] | 157 | __REQ_SYNC, /* request is sync (sync write or read) */ |
| 158 | __REQ_META, /* metadata io request */ |
Christoph Hellwig | 65299a3 | 2011-08-23 14:50:29 +0200 | [diff] [blame] | 159 | __REQ_PRIO, /* boost priority in cfq */ |
Matthew Wilcox | 8e4bf84 | 2011-08-11 10:36:03 +0200 | [diff] [blame] | 160 | |
Tejun Heo | 7cc0158 | 2010-08-03 13:14:58 +0200 | [diff] [blame] | 161 | __REQ_NOIDLE, /* don't anticipate more IO after this one */ |
Martin K. Petersen | 180b2f9 | 2014-09-26 19:19:56 -0400 | [diff] [blame] | 162 | __REQ_INTEGRITY, /* I/O includes block integrity payload */ |
Matthew Wilcox | 8e4bf84 | 2011-08-11 10:36:03 +0200 | [diff] [blame] | 163 | __REQ_FUA, /* forced unit access */ |
Mike Christie | 28a8f0d | 2016-06-05 14:32:25 -0500 | [diff] [blame] | 164 | __REQ_PREFLUSH, /* request for cache flush */ |
Tejun Heo | 7cc0158 | 2010-08-03 13:14:58 +0200 | [diff] [blame] | 165 | |
| 166 | /* bio only flags */ |
Tejun Heo | 7cc0158 | 2010-08-03 13:14:58 +0200 | [diff] [blame] | 167 | __REQ_RAHEAD, /* read ahead, can fail anytime */ |
Vivek Goyal | e43473b | 2010-09-15 17:06:35 -0400 | [diff] [blame] | 168 | __REQ_THROTTLED, /* This bio has already been subjected to |
| 169 | * throttling rules. Don't do it again. */ |
Tejun Heo | 7cc0158 | 2010-08-03 13:14:58 +0200 | [diff] [blame] | 170 | |
| 171 | /* request only flags */ |
| 172 | __REQ_SORTED, /* elevator knows about this request */ |
| 173 | __REQ_SOFTBARRIER, /* may not be passed by ioscheduler */ |
Tejun Heo | 7cc0158 | 2010-08-03 13:14:58 +0200 | [diff] [blame] | 174 | __REQ_NOMERGE, /* don't touch this for merging */ |
| 175 | __REQ_STARTED, /* drive already may have started this one */ |
| 176 | __REQ_DONTPREP, /* don't call prep for this one */ |
| 177 | __REQ_QUEUED, /* uses queueing */ |
| 178 | __REQ_ELVPRIV, /* elevator private data attached */ |
| 179 | __REQ_FAILED, /* set if the request failed */ |
| 180 | __REQ_QUIET, /* don't worry about errors */ |
Bart Van Assche | bba0bdd | 2015-03-04 10:31:47 +0100 | [diff] [blame] | 181 | __REQ_PREEMPT, /* set for "ide_preempt" requests and also |
| 182 | for requests for which the SCSI "quiesce" |
| 183 | state must be ignored. */ |
Tejun Heo | 7cc0158 | 2010-08-03 13:14:58 +0200 | [diff] [blame] | 184 | __REQ_ALLOCED, /* request came from our alloc pool */ |
| 185 | __REQ_COPY_USER, /* contains copies of user pages */ |
Tejun Heo | 414b4ff | 2011-01-25 12:43:49 +0100 | [diff] [blame] | 186 | __REQ_FLUSH_SEQ, /* request for flush sequence */ |
Tejun Heo | 7cc0158 | 2010-08-03 13:14:58 +0200 | [diff] [blame] | 187 | __REQ_IO_STAT, /* account I/O stat */ |
| 188 | __REQ_MIXED_MERGE, /* merge of different types, fail separately */ |
Lin Ming | 6631127 | 2013-03-23 11:42:24 +0800 | [diff] [blame] | 189 | __REQ_PM, /* runtime pm request */ |
Jens Axboe | 360f92c | 2014-04-09 20:27:01 -0600 | [diff] [blame] | 190 | __REQ_HASHED, /* on IO scheduler merge hash */ |
Jens Axboe | 0d2602c | 2014-05-13 15:10:52 -0600 | [diff] [blame] | 191 | __REQ_MQ_INFLIGHT, /* track inflight for MQ */ |
Tejun Heo | 7cc0158 | 2010-08-03 13:14:58 +0200 | [diff] [blame] | 192 | __REQ_NR_BITS, /* stops here */ |
| 193 | }; |
| 194 | |
Jens Axboe | 5953316 | 2013-05-23 12:25:08 +0200 | [diff] [blame] | 195 | #define REQ_FAILFAST_DEV (1ULL << __REQ_FAILFAST_DEV) |
| 196 | #define REQ_FAILFAST_TRANSPORT (1ULL << __REQ_FAILFAST_TRANSPORT) |
| 197 | #define REQ_FAILFAST_DRIVER (1ULL << __REQ_FAILFAST_DRIVER) |
| 198 | #define REQ_SYNC (1ULL << __REQ_SYNC) |
| 199 | #define REQ_META (1ULL << __REQ_META) |
| 200 | #define REQ_PRIO (1ULL << __REQ_PRIO) |
Jens Axboe | 5953316 | 2013-05-23 12:25:08 +0200 | [diff] [blame] | 201 | #define REQ_NOIDLE (1ULL << __REQ_NOIDLE) |
Martin K. Petersen | 180b2f9 | 2014-09-26 19:19:56 -0400 | [diff] [blame] | 202 | #define REQ_INTEGRITY (1ULL << __REQ_INTEGRITY) |
Tejun Heo | 7cc0158 | 2010-08-03 13:14:58 +0200 | [diff] [blame] | 203 | |
| 204 | #define REQ_FAILFAST_MASK \ |
| 205 | (REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER) |
| 206 | #define REQ_COMMON_MASK \ |
Mike Christie | 4e1b2d52 | 2016-06-05 14:32:22 -0500 | [diff] [blame] | 207 | (REQ_FAILFAST_MASK | REQ_SYNC | REQ_META | REQ_PRIO | REQ_NOIDLE | \ |
Christoph Hellwig | 288dab8 | 2016-06-09 16:00:36 +0200 | [diff] [blame] | 208 | REQ_PREFLUSH | REQ_FUA | REQ_INTEGRITY | REQ_NOMERGE) |
Tejun Heo | 3a2edd0 | 2010-09-03 11:56:18 +0200 | [diff] [blame] | 209 | #define REQ_CLONE_MASK REQ_COMMON_MASK |
Tejun Heo | 7cc0158 | 2010-08-03 13:14:58 +0200 | [diff] [blame] | 210 | |
Martin K. Petersen | e2a60da | 2012-09-18 12:19:25 -0400 | [diff] [blame] | 211 | /* This mask is used for both bio and request merge checking */ |
| 212 | #define REQ_NOMERGE_FLAGS \ |
Mike Christie | 28a8f0d | 2016-06-05 14:32:25 -0500 | [diff] [blame] | 213 | (REQ_NOMERGE | REQ_STARTED | REQ_SOFTBARRIER | REQ_PREFLUSH | REQ_FUA | REQ_FLUSH_SEQ) |
Martin K. Petersen | e2a60da | 2012-09-18 12:19:25 -0400 | [diff] [blame] | 214 | |
Jens Axboe | 5953316 | 2013-05-23 12:25:08 +0200 | [diff] [blame] | 215 | #define REQ_RAHEAD (1ULL << __REQ_RAHEAD) |
| 216 | #define REQ_THROTTLED (1ULL << __REQ_THROTTLED) |
Tejun Heo | 7cc0158 | 2010-08-03 13:14:58 +0200 | [diff] [blame] | 217 | |
Jens Axboe | 5953316 | 2013-05-23 12:25:08 +0200 | [diff] [blame] | 218 | #define REQ_SORTED (1ULL << __REQ_SORTED) |
| 219 | #define REQ_SOFTBARRIER (1ULL << __REQ_SOFTBARRIER) |
| 220 | #define REQ_FUA (1ULL << __REQ_FUA) |
| 221 | #define REQ_NOMERGE (1ULL << __REQ_NOMERGE) |
| 222 | #define REQ_STARTED (1ULL << __REQ_STARTED) |
| 223 | #define REQ_DONTPREP (1ULL << __REQ_DONTPREP) |
| 224 | #define REQ_QUEUED (1ULL << __REQ_QUEUED) |
| 225 | #define REQ_ELVPRIV (1ULL << __REQ_ELVPRIV) |
| 226 | #define REQ_FAILED (1ULL << __REQ_FAILED) |
| 227 | #define REQ_QUIET (1ULL << __REQ_QUIET) |
| 228 | #define REQ_PREEMPT (1ULL << __REQ_PREEMPT) |
| 229 | #define REQ_ALLOCED (1ULL << __REQ_ALLOCED) |
| 230 | #define REQ_COPY_USER (1ULL << __REQ_COPY_USER) |
Mike Christie | 28a8f0d | 2016-06-05 14:32:25 -0500 | [diff] [blame] | 231 | #define REQ_PREFLUSH (1ULL << __REQ_PREFLUSH) |
Jens Axboe | 5953316 | 2013-05-23 12:25:08 +0200 | [diff] [blame] | 232 | #define REQ_FLUSH_SEQ (1ULL << __REQ_FLUSH_SEQ) |
| 233 | #define REQ_IO_STAT (1ULL << __REQ_IO_STAT) |
| 234 | #define REQ_MIXED_MERGE (1ULL << __REQ_MIXED_MERGE) |
Jens Axboe | 5953316 | 2013-05-23 12:25:08 +0200 | [diff] [blame] | 235 | #define REQ_PM (1ULL << __REQ_PM) |
Jens Axboe | 360f92c | 2014-04-09 20:27:01 -0600 | [diff] [blame] | 236 | #define REQ_HASHED (1ULL << __REQ_HASHED) |
Jens Axboe | 0d2602c | 2014-05-13 15:10:52 -0600 | [diff] [blame] | 237 | #define REQ_MQ_INFLIGHT (1ULL << __REQ_MQ_INFLIGHT) |
Tejun Heo | 7cc0158 | 2010-08-03 13:14:58 +0200 | [diff] [blame] | 238 | |
Jens Axboe | c11f0c0 | 2016-08-05 08:11:04 -0600 | [diff] [blame] | 239 | enum req_op { |
| 240 | REQ_OP_READ, |
| 241 | REQ_OP_WRITE, |
| 242 | REQ_OP_DISCARD, /* request to discard sectors */ |
| 243 | REQ_OP_SECURE_ERASE, /* request to securely erase sectors */ |
| 244 | REQ_OP_WRITE_SAME, /* write same block many times */ |
| 245 | REQ_OP_FLUSH, /* request for cache flush */ |
| 246 | }; |
| 247 | |
| 248 | #define REQ_OP_BITS 3 |
| 249 | |
Jens Axboe | dece163 | 2015-11-05 10:41:16 -0700 | [diff] [blame] | 250 | typedef unsigned int blk_qc_t; |
| 251 | #define BLK_QC_T_NONE -1U |
| 252 | #define BLK_QC_T_SHIFT 16 |
| 253 | |
| 254 | static inline bool blk_qc_t_valid(blk_qc_t cookie) |
| 255 | { |
| 256 | return cookie != BLK_QC_T_NONE; |
| 257 | } |
| 258 | |
| 259 | static inline blk_qc_t blk_tag_to_qc_t(unsigned int tag, unsigned int queue_num) |
| 260 | { |
| 261 | return tag | (queue_num << BLK_QC_T_SHIFT); |
| 262 | } |
| 263 | |
| 264 | static inline unsigned int blk_qc_t_to_queue_num(blk_qc_t cookie) |
| 265 | { |
| 266 | return cookie >> BLK_QC_T_SHIFT; |
| 267 | } |
| 268 | |
| 269 | static inline unsigned int blk_qc_t_to_tag(blk_qc_t cookie) |
| 270 | { |
Jens Axboe | e3a7a3b | 2015-11-11 09:37:34 -0700 | [diff] [blame] | 271 | return cookie & ((1u << BLK_QC_T_SHIFT) - 1); |
Jens Axboe | dece163 | 2015-11-05 10:41:16 -0700 | [diff] [blame] | 272 | } |
| 273 | |
Tejun Heo | 7cc0158 | 2010-08-03 13:14:58 +0200 | [diff] [blame] | 274 | #endif /* __LINUX_BLK_TYPES_H */ |