blob: fe8dd2797b1f092abf37fc81fe26ef04746d6dca [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001#ifndef _LINUX_BLKDEV_H
2#define _LINUX_BLKDEV_H
3
Russell King85fd0bc2012-05-14 08:29:23 +02004#include <linux/sched.h>
5
Jens Axboef5ff8422007-09-21 09:19:54 +02006#ifdef CONFIG_BLOCK
7
Linus Torvalds1da177e2005-04-16 15:20:36 -07008#include <linux/major.h>
9#include <linux/genhd.h>
10#include <linux/list.h>
Jens Axboe320ae512013-10-24 09:20:05 +010011#include <linux/llist.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070012#include <linux/timer.h>
13#include <linux/workqueue.h>
14#include <linux/pagemap.h>
Tejun Heo66114ca2015-05-22 17:13:32 -040015#include <linux/backing-dev-defs.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070016#include <linux/wait.h>
17#include <linux/mempool.h>
Dan Williams34c0fd52016-01-15 16:56:14 -080018#include <linux/pfn.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070019#include <linux/bio.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070020#include <linux/stringify.h>
Hugh Dickins3e6053d2008-09-11 10:57:55 +020021#include <linux/gfp.h>
FUJITA Tomonorid351af02007-07-09 12:40:35 +020022#include <linux/bsg.h>
Jens Axboec7c22e42008-09-13 20:26:01 +020023#include <linux/smp.h>
Tejun Heo548bc8e2013-01-09 08:05:13 -080024#include <linux/rcupdate.h>
Tejun Heoadd703f2014-07-01 10:34:38 -060025#include <linux/percpu-refcount.h>
Christoph Hellwig84be4562015-05-01 12:46:15 +020026#include <linux/scatterlist.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070027
Paul Gortmakerde477252011-05-26 13:46:22 -040028struct module;
Christoph Hellwig21b2f0c2006-03-22 17:52:04 +010029struct scsi_ioctl_command;
30
Linus Torvalds1da177e2005-04-16 15:20:36 -070031struct request_queue;
Linus Torvalds1da177e2005-04-16 15:20:36 -070032struct elevator_queue;
Jens Axboe2056a782006-03-23 20:00:26 +010033struct blk_trace;
Jens Axboe3d6392c2007-07-09 12:38:05 +020034struct request;
35struct sg_io_hdr;
Mike Christieaa387cc2011-07-31 22:05:09 +020036struct bsg_job;
Tejun Heo3c798392012-04-16 13:57:25 -070037struct blkcg_gq;
Ming Lei7c94e1c2014-09-25 23:23:43 +080038struct blk_flush_queue;
Christoph Hellwigbbd3e062015-10-15 14:10:48 +020039struct pr_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -070040
41#define BLKDEV_MIN_RQ 4
42#define BLKDEV_MAX_RQ 128 /* Default maximum */
43
Tejun Heo8bd435b2012-04-13 13:11:28 -070044/*
45 * Maximum number of blkcg policies allowed to be registered concurrently.
46 * Defined here to simplify include dependency.
47 */
48#define BLKCG_MAX_POLS 2
49
Tejun Heo8ffdc652006-01-06 09:49:03 +010050typedef void (rq_end_io_fn)(struct request *, int);
Linus Torvalds1da177e2005-04-16 15:20:36 -070051
Tejun Heo5b788ce2012-06-04 20:40:59 -070052#define BLK_RL_SYNCFULL (1U << 0)
53#define BLK_RL_ASYNCFULL (1U << 1)
54
Linus Torvalds1da177e2005-04-16 15:20:36 -070055struct request_list {
Tejun Heo5b788ce2012-06-04 20:40:59 -070056 struct request_queue *q; /* the queue this rl belongs to */
Tejun Heoa0516612012-06-26 15:05:44 -070057#ifdef CONFIG_BLK_CGROUP
58 struct blkcg_gq *blkg; /* blkg this request pool belongs to */
59#endif
Jens Axboe1faa16d2009-04-06 14:48:01 +020060 /*
61 * count[], starved[], and wait[] are indexed by
62 * BLK_RW_SYNC/BLK_RW_ASYNC
63 */
Tejun Heo8a5ecdd2012-06-04 20:40:58 -070064 int count[2];
65 int starved[2];
66 mempool_t *rq_pool;
67 wait_queue_head_t wait[2];
Tejun Heo5b788ce2012-06-04 20:40:59 -070068 unsigned int flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -070069};
70
Jens Axboe4aff5e22006-08-10 08:44:47 +020071/*
72 * request command types
73 */
74enum rq_cmd_type_bits {
75 REQ_TYPE_FS = 1, /* fs request */
76 REQ_TYPE_BLOCK_PC, /* scsi command */
Christoph Hellwigb42171e2015-04-17 22:37:17 +020077 REQ_TYPE_DRV_PRIV, /* driver defined types from here */
Jens Axboe4aff5e22006-08-10 08:44:47 +020078};
79
Linus Torvalds1da177e2005-04-16 15:20:36 -070080#define BLK_MAX_CDB 16
81
82/*
Christoph Hellwigaf76e552014-05-06 12:12:45 +020083 * Try to put the fields that are referenced together in the same cacheline.
84 *
85 * If you modify this structure, make sure to update blk_rq_init() and
86 * especially blk_mq_rq_ctx_init() to take care of the added fields.
Linus Torvalds1da177e2005-04-16 15:20:36 -070087 */
88struct request {
Christoph Hellwig6897fc22014-01-30 15:45:47 -080089 struct list_head queuelist;
Jens Axboe320ae512013-10-24 09:20:05 +010090 union {
91 struct call_single_data csd;
Jan Kara9828c2c2016-06-28 09:03:59 +020092 u64 fifo_time;
Jens Axboe320ae512013-10-24 09:20:05 +010093 };
Jens Axboeff856ba2006-01-09 16:02:34 +010094
Jens Axboe165125e2007-07-24 09:28:11 +020095 struct request_queue *q;
Jens Axboe320ae512013-10-24 09:20:05 +010096 struct blk_mq_ctx *mq_ctx;
Jens Axboee6a1c872006-08-10 09:00:21 +020097
Richard Kennedy181fdde2010-03-19 08:58:16 +010098 int cpu;
Christoph Hellwigca93e452016-06-09 16:00:35 +020099 unsigned cmd_type;
100 u64 cmd_flags;
101 unsigned long atomic_flags;
Richard Kennedy181fdde2010-03-19 08:58:16 +0100102
Tejun Heoa2dec7b2009-05-07 22:24:44 +0900103 /* the following two fields are internal, NEVER access directly */
Tejun Heoa2dec7b2009-05-07 22:24:44 +0900104 unsigned int __data_len; /* total data len */
Richard Kennedy181fdde2010-03-19 08:58:16 +0100105 sector_t __sector; /* sector cursor */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700106
107 struct bio *bio;
108 struct bio *biotail;
109
Jens Axboe360f92c2014-04-09 20:27:01 -0600110 /*
111 * The hash is used inside the scheduler, and killed once the
112 * request reaches the dispatch list. The ipi_list is only used
113 * to queue the request for softirq completion, which is long
114 * after the request has been unhashed (and even removed from
115 * the dispatch list).
116 */
117 union {
118 struct hlist_node hash; /* merge hash */
119 struct list_head ipi_list;
120 };
121
Jens Axboee6a1c872006-08-10 09:00:21 +0200122 /*
123 * The rb_node is only used inside the io scheduler, requests
124 * are pruned when moved to the dispatch queue. So let the
Mike Snitzerc1867942011-02-11 11:08:00 +0100125 * completion_data share space with the rb_node.
Jens Axboee6a1c872006-08-10 09:00:21 +0200126 */
127 union {
128 struct rb_node rb_node; /* sort/lookup */
Mike Snitzerc1867942011-02-11 11:08:00 +0100129 void *completion_data;
Jens Axboee6a1c872006-08-10 09:00:21 +0200130 };
Jens Axboe98170642006-07-28 09:23:08 +0200131
Jens Axboeff7d1452006-07-12 14:04:37 +0200132 /*
Vivek Goyal7f1dc8a2010-04-21 17:44:16 +0200133 * Three pointers are available for the IO schedulers, if they need
Mike Snitzerc1867942011-02-11 11:08:00 +0100134 * more they have to dynamically allocate it. Flush requests are
135 * never put on the IO scheduler. So let the flush fields share
Tejun Heoa612fdd2011-12-14 00:33:41 +0100136 * space with the elevator data.
Jens Axboeff7d1452006-07-12 14:04:37 +0200137 */
Mike Snitzerc1867942011-02-11 11:08:00 +0100138 union {
Tejun Heoa612fdd2011-12-14 00:33:41 +0100139 struct {
140 struct io_cq *icq;
141 void *priv[2];
142 } elv;
143
Mike Snitzerc1867942011-02-11 11:08:00 +0100144 struct {
145 unsigned int seq;
146 struct list_head list;
Jeff Moyer4853aba2011-08-15 21:37:25 +0200147 rq_end_io_fn *saved_end_io;
Mike Snitzerc1867942011-02-11 11:08:00 +0100148 } flush;
149 };
Jens Axboeff7d1452006-07-12 14:04:37 +0200150
Jens Axboe8f34ee72006-06-13 09:02:34 +0200151 struct gendisk *rq_disk;
Jerome Marchand09e099d2011-01-05 16:57:38 +0100152 struct hd_struct *part;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700153 unsigned long start_time;
Divyesh Shah91952912010-04-01 15:01:41 -0700154#ifdef CONFIG_BLK_CGROUP
Tejun Heoa0516612012-06-26 15:05:44 -0700155 struct request_list *rl; /* rl this rq is alloced from */
Divyesh Shah91952912010-04-01 15:01:41 -0700156 unsigned long long start_time_ns;
157 unsigned long long io_start_time_ns; /* when passed to hardware */
158#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700159 /* Number of scatter-gather DMA addr+len pairs after
160 * physical address coalescing is performed.
161 */
162 unsigned short nr_phys_segments;
Martin K. Petersen13f05c82010-09-10 20:50:10 +0200163#if defined(CONFIG_BLK_DEV_INTEGRITY)
164 unsigned short nr_integrity_segments;
165#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700166
Jens Axboe8f34ee72006-06-13 09:02:34 +0200167 unsigned short ioprio;
168
Tejun Heo731ec492009-04-23 11:05:20 +0900169 void *special; /* opaque pointer available for LLD use */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700170
Jens Axboecdd60262006-07-28 09:32:07 +0200171 int tag;
172 int errors;
173
Linus Torvalds1da177e2005-04-16 15:20:36 -0700174 /*
175 * when request is used as a packet command carrier
176 */
FUJITA Tomonorid7e3c322008-04-29 09:54:39 +0200177 unsigned char __cmd[BLK_MAX_CDB];
178 unsigned char *cmd;
Richard Kennedy181fdde2010-03-19 08:58:16 +0100179 unsigned short cmd_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700180
FUJITA Tomonori7a85f882008-03-04 11:17:11 +0100181 unsigned int extra_len; /* length of alignment and padding */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700182 unsigned int sense_len;
Tejun Heoc3a4d782009-05-07 22:24:37 +0900183 unsigned int resid_len; /* residual count */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700184 void *sense;
185
Jens Axboe242f9dc2008-09-14 05:55:09 -0700186 unsigned long deadline;
187 struct list_head timeout_list;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700188 unsigned int timeout;
Mike Christie17e01f22005-11-11 05:31:37 -0600189 int retries;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700190
191 /*
Jens Axboec00895a2006-09-30 20:29:12 +0200192 * completion callback.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700193 */
194 rq_end_io_fn *end_io;
195 void *end_io_data;
FUJITA Tomonoriabae1fd2007-07-16 08:52:14 +0200196
197 /* for bidi */
198 struct request *next_rq;
Mohan Srinivasane2d88782016-12-14 15:55:36 -0800199
200 ktime_t lat_hist_io_start;
201 int lat_hist_enabled;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700202};
203
Mike Christie4e1b2d522016-06-05 14:32:22 -0500204#define REQ_OP_SHIFT (8 * sizeof(u64) - REQ_OP_BITS)
205#define req_op(req) ((req)->cmd_flags >> REQ_OP_SHIFT)
206
207#define req_set_op(req, op) do { \
208 WARN_ON(op >= (1 << REQ_OP_BITS)); \
209 (req)->cmd_flags &= ((1ULL << REQ_OP_SHIFT) - 1); \
210 (req)->cmd_flags |= ((u64) (op) << REQ_OP_SHIFT); \
211} while (0)
212
Mike Christief2150822016-06-05 14:31:42 -0500213#define req_set_op_attrs(req, op, flags) do { \
214 req_set_op(req, op); \
215 (req)->cmd_flags |= flags; \
216} while (0)
217
Fernando Luis Vázquez Cao766ca442008-08-14 09:59:13 +0200218static inline unsigned short req_get_ioprio(struct request *req)
219{
220 return req->ioprio;
221}
222
Linus Torvalds1da177e2005-04-16 15:20:36 -0700223#include <linux/elevator.h>
224
Jens Axboe320ae512013-10-24 09:20:05 +0100225struct blk_queue_ctx;
226
Jens Axboe165125e2007-07-24 09:28:11 +0200227typedef void (request_fn_proc) (struct request_queue *q);
Jens Axboedece1632015-11-05 10:41:16 -0700228typedef blk_qc_t (make_request_fn) (struct request_queue *q, struct bio *bio);
Jens Axboe165125e2007-07-24 09:28:11 +0200229typedef int (prep_rq_fn) (struct request_queue *, struct request *);
James Bottomley28018c22010-07-01 19:49:17 +0900230typedef void (unprep_rq_fn) (struct request_queue *, struct request *);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700231
232struct bio_vec;
Jens Axboeff856ba2006-01-09 16:02:34 +0100233typedef void (softirq_done_fn)(struct request *);
Tejun Heo2fb98e82008-02-19 11:36:53 +0100234typedef int (dma_drain_needed_fn)(struct request *);
Kiyoshi Uedaef9e3fa2008-10-01 16:12:15 +0200235typedef int (lld_busy_fn) (struct request_queue *q);
Mike Christieaa387cc2011-07-31 22:05:09 +0200236typedef int (bsg_job_fn) (struct bsg_job *);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700237
Jens Axboe242f9dc2008-09-14 05:55:09 -0700238enum blk_eh_timer_return {
239 BLK_EH_NOT_HANDLED,
240 BLK_EH_HANDLED,
241 BLK_EH_RESET_TIMER,
242};
243
244typedef enum blk_eh_timer_return (rq_timed_out_fn)(struct request *);
245
Linus Torvalds1da177e2005-04-16 15:20:36 -0700246enum blk_queue_state {
247 Queue_down,
248 Queue_up,
249};
250
Linus Torvalds1da177e2005-04-16 15:20:36 -0700251struct blk_queue_tag {
252 struct request **tag_index; /* map of busy tags */
253 unsigned long *tag_map; /* bit map of free/busy tags */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700254 int busy; /* current depth */
255 int max_depth; /* what we will send to device */
Tejun Heoba025082005-08-05 13:28:11 -0700256 int real_max_depth; /* what the array can hold */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700257 atomic_t refcnt; /* map can be shared */
Shaohua Liee1b6f72015-01-15 17:32:25 -0800258 int alloc_policy; /* tag allocation policy */
259 int next_tag; /* next tag */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700260};
Shaohua Liee1b6f72015-01-15 17:32:25 -0800261#define BLK_TAG_ALLOC_FIFO 0 /* allocate starting from 0 */
262#define BLK_TAG_ALLOC_RR 1 /* allocate starting from last allocated tag */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700263
FUJITA Tomonoriabf54392008-08-16 14:10:05 +0900264#define BLK_SCSI_MAX_CMDS (256)
265#define BLK_SCSI_CMD_PER_LONG (BLK_SCSI_MAX_CMDS / (sizeof(long) * 8))
266
Martin K. Petersen025146e2009-05-22 17:17:51 -0400267struct queue_limits {
268 unsigned long bounce_pfn;
269 unsigned long seg_boundary_mask;
Keith Busch03100aa2015-08-19 14:24:05 -0700270 unsigned long virt_boundary_mask;
Martin K. Petersen025146e2009-05-22 17:17:51 -0400271
272 unsigned int max_hw_sectors;
Martin K. Petersenca369d52015-11-13 16:46:48 -0500273 unsigned int max_dev_sectors;
Jens Axboe762380a2014-06-05 13:38:39 -0600274 unsigned int chunk_sectors;
Martin K. Petersen025146e2009-05-22 17:17:51 -0400275 unsigned int max_sectors;
276 unsigned int max_segment_size;
Martin K. Petersenc72758f2009-05-22 17:17:53 -0400277 unsigned int physical_block_size;
278 unsigned int alignment_offset;
279 unsigned int io_min;
280 unsigned int io_opt;
Christoph Hellwig67efc922009-09-30 13:54:20 +0200281 unsigned int max_discard_sectors;
Jens Axboe0034af02015-07-16 09:14:26 -0600282 unsigned int max_hw_discard_sectors;
Martin K. Petersen4363ac72012-09-18 12:19:27 -0400283 unsigned int max_write_same_sectors;
Martin K. Petersen86b37282009-11-10 11:50:21 +0100284 unsigned int discard_granularity;
285 unsigned int discard_alignment;
Martin K. Petersen025146e2009-05-22 17:17:51 -0400286
287 unsigned short logical_block_size;
Martin K. Petersen8a783622010-02-26 00:20:39 -0500288 unsigned short max_segments;
Martin K. Petersen13f05c82010-09-10 20:50:10 +0200289 unsigned short max_integrity_segments;
Martin K. Petersen025146e2009-05-22 17:17:51 -0400290
Martin K. Petersenc72758f2009-05-22 17:17:53 -0400291 unsigned char misaligned;
Martin K. Petersen86b37282009-11-10 11:50:21 +0100292 unsigned char discard_misaligned;
Martin K. Petersene692cb62010-12-01 19:41:49 +0100293 unsigned char cluster;
Martin K. Petersena934a002011-05-18 10:37:35 +0200294 unsigned char discard_zeroes_data;
Kent Overstreetc78afc62013-07-11 22:39:53 -0700295 unsigned char raid_partial_stripes_expensive;
Martin K. Petersen025146e2009-05-22 17:17:51 -0400296};
297
Richard Kennedyd7b76302011-07-13 21:17:23 +0200298struct request_queue {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700299 /*
300 * Together with queue_head for cacheline sharing
301 */
302 struct list_head queue_head;
303 struct request *last_merge;
Jens Axboeb374d182008-10-31 10:05:07 +0100304 struct elevator_queue *elevator;
Tejun Heo8a5ecdd2012-06-04 20:40:58 -0700305 int nr_rqs[2]; /* # allocated [a]sync rqs */
306 int nr_rqs_elvpriv; /* # allocated rqs w/ elvpriv */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700307
308 /*
Tejun Heoa0516612012-06-26 15:05:44 -0700309 * If blkcg is not used, @q->root_rl serves all requests. If blkcg
310 * is used, root blkg allocates from @q->root_rl and all other
311 * blkgs from their own blkg->rl. Which one to use should be
312 * determined using bio_request_list().
Linus Torvalds1da177e2005-04-16 15:20:36 -0700313 */
Tejun Heoa0516612012-06-26 15:05:44 -0700314 struct request_list root_rl;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700315
316 request_fn_proc *request_fn;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700317 make_request_fn *make_request_fn;
318 prep_rq_fn *prep_rq_fn;
James Bottomley28018c22010-07-01 19:49:17 +0900319 unprep_rq_fn *unprep_rq_fn;
Jens Axboeff856ba2006-01-09 16:02:34 +0100320 softirq_done_fn *softirq_done_fn;
Jens Axboe242f9dc2008-09-14 05:55:09 -0700321 rq_timed_out_fn *rq_timed_out_fn;
Tejun Heo2fb98e82008-02-19 11:36:53 +0100322 dma_drain_needed_fn *dma_drain_needed;
Kiyoshi Uedaef9e3fa2008-10-01 16:12:15 +0200323 lld_busy_fn *lld_busy_fn;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700324
Jens Axboe320ae512013-10-24 09:20:05 +0100325 struct blk_mq_ops *mq_ops;
326
327 unsigned int *mq_map;
328
329 /* sw queues */
Ming Leie6cdb092014-06-03 11:24:06 +0800330 struct blk_mq_ctx __percpu *queue_ctx;
Jens Axboe320ae512013-10-24 09:20:05 +0100331 unsigned int nr_queues;
332
333 /* hw dispatch queues */
334 struct blk_mq_hw_ctx **queue_hw_ctx;
335 unsigned int nr_hw_queues;
336
Linus Torvalds1da177e2005-04-16 15:20:36 -0700337 /*
Tejun Heo8922e162005-10-20 16:23:44 +0200338 * Dispatch queue sorting
339 */
Jens Axboe1b47f532005-10-20 16:37:00 +0200340 sector_t end_sector;
Tejun Heo8922e162005-10-20 16:23:44 +0200341 struct request *boundary_rq;
Tejun Heo8922e162005-10-20 16:23:44 +0200342
343 /*
Jens Axboe3cca6dc2011-03-02 11:08:00 -0500344 * Delayed queue handling
Linus Torvalds1da177e2005-04-16 15:20:36 -0700345 */
Jens Axboe3cca6dc2011-03-02 11:08:00 -0500346 struct delayed_work delay_work;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700347
348 struct backing_dev_info backing_dev_info;
349
350 /*
351 * The queue owner gets to use this for whatever they like.
352 * ll_rw_blk doesn't touch it.
353 */
354 void *queuedata;
355
Linus Torvalds1da177e2005-04-16 15:20:36 -0700356 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700357 * various queue flags, see QUEUE_* below
358 */
359 unsigned long queue_flags;
360
361 /*
Tejun Heoa73f7302011-12-14 00:33:37 +0100362 * ida allocated id for this queue. Used to index queues from
363 * ioctx.
364 */
365 int id;
366
367 /*
Richard Kennedyd7b76302011-07-13 21:17:23 +0200368 * queue needs bounce pages for pages above this limit
369 */
370 gfp_t bounce_gfp;
371
372 /*
152587d2005-04-12 16:22:06 -0500373 * protects queue structures from reentrancy. ->__queue_lock should
374 * _never_ be used directly, it is queue private. always use
375 * ->queue_lock.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700376 */
152587d2005-04-12 16:22:06 -0500377 spinlock_t __queue_lock;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700378 spinlock_t *queue_lock;
379
380 /*
381 * queue kobject
382 */
383 struct kobject kobj;
384
Jens Axboe320ae512013-10-24 09:20:05 +0100385 /*
386 * mq queue kobject
387 */
388 struct kobject mq_kobj;
389
Dan Williamsac6fc482015-10-21 13:20:18 -0400390#ifdef CONFIG_BLK_DEV_INTEGRITY
391 struct blk_integrity integrity;
392#endif /* CONFIG_BLK_DEV_INTEGRITY */
393
Rafael J. Wysocki47fafbc2014-12-04 01:00:23 +0100394#ifdef CONFIG_PM
Lin Ming6c954662013-03-23 11:42:26 +0800395 struct device *dev;
396 int rpm_status;
397 unsigned int nr_pending;
398#endif
399
Linus Torvalds1da177e2005-04-16 15:20:36 -0700400 /*
401 * queue settings
402 */
403 unsigned long nr_requests; /* Max # of requests */
404 unsigned int nr_congestion_on;
405 unsigned int nr_congestion_off;
406 unsigned int nr_batching;
407
James Bottomleyfa0ccd82008-01-10 11:30:36 -0600408 unsigned int dma_drain_size;
Richard Kennedyd7b76302011-07-13 21:17:23 +0200409 void *dma_drain_buffer;
Tejun Heoe3790c72008-03-04 11:18:17 +0100410 unsigned int dma_pad_mask;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700411 unsigned int dma_alignment;
412
413 struct blk_queue_tag *queue_tags;
Jens Axboe6eca9002007-10-25 10:14:47 +0200414 struct list_head tag_busy_list;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700415
Tejun Heo15853af2005-11-10 08:52:05 +0100416 unsigned int nr_sorted;
Jens Axboe0a7ae2f2009-05-20 08:54:31 +0200417 unsigned int in_flight[2];
Bart Van Assche24faf6f2012-11-28 13:46:45 +0100418 /*
419 * Number of active block driver functions for which blk_drain_queue()
420 * must wait. Must be incremented around functions that unlock the
421 * queue_lock internally, e.g. scsi_request_fn().
422 */
423 unsigned int request_fn_active;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700424
Jens Axboe242f9dc2008-09-14 05:55:09 -0700425 unsigned int rq_timeout;
426 struct timer_list timeout;
Christoph Hellwig287922e2015-10-30 20:57:30 +0800427 struct work_struct timeout_work;
Jens Axboe242f9dc2008-09-14 05:55:09 -0700428 struct list_head timeout_list;
429
Tejun Heoa612fdd2011-12-14 00:33:41 +0100430 struct list_head icq_list;
Tejun Heo4eef3042012-03-05 13:15:18 -0800431#ifdef CONFIG_BLK_CGROUP
Tejun Heoa2b16932012-04-13 13:11:33 -0700432 DECLARE_BITMAP (blkcg_pols, BLKCG_MAX_POLS);
Tejun Heo3c798392012-04-16 13:57:25 -0700433 struct blkcg_gq *root_blkg;
Tejun Heo03aa2642012-03-05 13:15:19 -0800434 struct list_head blkg_list;
Tejun Heo4eef3042012-03-05 13:15:18 -0800435#endif
Tejun Heoa612fdd2011-12-14 00:33:41 +0100436
Martin K. Petersen025146e2009-05-22 17:17:51 -0400437 struct queue_limits limits;
438
Linus Torvalds1da177e2005-04-16 15:20:36 -0700439 /*
440 * sg stuff
441 */
442 unsigned int sg_timeout;
443 unsigned int sg_reserved_size;
Christoph Lameter19460892005-06-23 00:08:19 -0700444 int node;
Alexey Dobriyan6c5c9342006-09-29 01:59:40 -0700445#ifdef CONFIG_BLK_DEV_IO_TRACE
Jens Axboe2056a782006-03-23 20:00:26 +0100446 struct blk_trace *blk_trace;
Alexey Dobriyan6c5c9342006-09-29 01:59:40 -0700447#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700448 /*
Tejun Heo4913efe2010-09-03 11:56:16 +0200449 * for flush operations
Linus Torvalds1da177e2005-04-16 15:20:36 -0700450 */
Ming Lei7c94e1c2014-09-25 23:23:43 +0800451 struct blk_flush_queue *fq;
Al Viro483f4af2006-03-18 18:34:37 -0500452
Christoph Hellwig6fca6a62014-05-28 08:08:02 -0600453 struct list_head requeue_list;
454 spinlock_t requeue_lock;
Mike Snitzer28494502016-09-14 13:28:30 -0400455 struct delayed_work requeue_work;
Christoph Hellwig6fca6a62014-05-28 08:08:02 -0600456
Al Viro483f4af2006-03-18 18:34:37 -0500457 struct mutex sysfs_lock;
FUJITA Tomonorid351af02007-07-09 12:40:35 +0200458
Tejun Heod7325802012-03-05 13:14:58 -0800459 int bypass_depth;
Christoph Hellwig4ecd4fe2015-05-07 09:38:13 +0200460 atomic_t mq_freeze_depth;
Tejun Heod7325802012-03-05 13:14:58 -0800461
FUJITA Tomonorid351af02007-07-09 12:40:35 +0200462#if defined(CONFIG_BLK_DEV_BSG)
Mike Christieaa387cc2011-07-31 22:05:09 +0200463 bsg_job_fn *bsg_job_fn;
464 int bsg_job_size;
FUJITA Tomonorid351af02007-07-09 12:40:35 +0200465 struct bsg_class_device bsg_dev;
466#endif
Vivek Goyale43473b2010-09-15 17:06:35 -0400467
468#ifdef CONFIG_BLK_DEV_THROTTLING
469 /* Throttle data */
470 struct throtl_data *td;
471#endif
Tejun Heo548bc8e2013-01-09 08:05:13 -0800472 struct rcu_head rcu_head;
Jens Axboe320ae512013-10-24 09:20:05 +0100473 wait_queue_head_t mq_freeze_wq;
Dan Williams3ef28e82015-10-21 13:20:12 -0400474 struct percpu_ref q_usage_counter;
Jens Axboe320ae512013-10-24 09:20:05 +0100475 struct list_head all_q_node;
Jens Axboe0d2602c2014-05-13 15:10:52 -0600476
477 struct blk_mq_tag_set *tag_set;
478 struct list_head tag_set_list;
Kent Overstreet54efd502015-04-23 22:37:18 -0700479 struct bio_set *bio_split;
Akinobu Mita4593fdb2015-09-27 02:09:20 +0900480
481 bool mq_sysfs_init_done;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700482};
483
Linus Torvalds1da177e2005-04-16 15:20:36 -0700484#define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */
485#define QUEUE_FLAG_STOPPED 2 /* queue is stopped */
Jens Axboe1faa16d2009-04-06 14:48:01 +0200486#define QUEUE_FLAG_SYNCFULL 3 /* read queue has been filled */
487#define QUEUE_FLAG_ASYNCFULL 4 /* write queue has been filled */
Bart Van Assche3f3299d2012-11-28 13:42:38 +0100488#define QUEUE_FLAG_DYING 5 /* queue being torn down */
Tejun Heod7325802012-03-05 13:14:58 -0800489#define QUEUE_FLAG_BYPASS 6 /* act as dumb FIFO queue */
Jens Axboec21e6be2011-04-19 13:32:46 +0200490#define QUEUE_FLAG_BIDI 7 /* queue supports bidi requests */
491#define QUEUE_FLAG_NOMERGES 8 /* disable merge attempts */
Dan Williams5757a6d2011-07-23 20:44:25 +0200492#define QUEUE_FLAG_SAME_COMP 9 /* complete on same CPU-group */
Jens Axboec21e6be2011-04-19 13:32:46 +0200493#define QUEUE_FLAG_FAIL_IO 10 /* fake timeout */
494#define QUEUE_FLAG_STACKABLE 11 /* supports request stacking */
495#define QUEUE_FLAG_NONROT 12 /* non-rotational device (SSD) */
Fernando Luis Vázquez Cao88e740f2008-10-27 18:44:46 +0900496#define QUEUE_FLAG_VIRT QUEUE_FLAG_NONROT /* paravirt device */
Jens Axboec21e6be2011-04-19 13:32:46 +0200497#define QUEUE_FLAG_IO_STAT 13 /* do IO stats */
498#define QUEUE_FLAG_DISCARD 14 /* supports DISCARD */
499#define QUEUE_FLAG_NOXMERGES 15 /* No extended merges */
500#define QUEUE_FLAG_ADD_RANDOM 16 /* Contributes to random pool */
Christoph Hellwig288dab82016-06-09 16:00:36 +0200501#define QUEUE_FLAG_SECERASE 17 /* supports secure erase */
Dan Williams5757a6d2011-07-23 20:44:25 +0200502#define QUEUE_FLAG_SAME_FORCE 18 /* force complete on same CPU */
Bart Van Asschec246e802012-12-06 14:32:01 +0100503#define QUEUE_FLAG_DEAD 19 /* queue tear-down finished */
Jens Axboe320ae512013-10-24 09:20:05 +0100504#define QUEUE_FLAG_INIT_DONE 20 /* queue is initialized */
Jens Axboe05f1dd52014-05-29 09:53:32 -0600505#define QUEUE_FLAG_NO_SG_MERGE 21 /* don't attempt to merge SG segments*/
Jens Axboe05229be2015-11-05 10:44:55 -0700506#define QUEUE_FLAG_POLL 22 /* IO polling enabled if set */
Jens Axboe93e9d8e2016-04-12 12:32:46 -0600507#define QUEUE_FLAG_WC 23 /* Write back caching */
508#define QUEUE_FLAG_FUA 24 /* device supports FUA writes */
Jens Axboec888a8f2016-04-13 13:33:19 -0600509#define QUEUE_FLAG_FLUSH_NQ 25 /* flush not queueuable */
Toshi Kani163d4ba2016-06-23 17:05:50 -0400510#define QUEUE_FLAG_DAX 26 /* device supports DAX */
Jens Axboebc58ba92009-01-23 10:54:44 +0100511
512#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \
Jens Axboe01e97f62009-09-03 20:06:47 +0200513 (1 << QUEUE_FLAG_STACKABLE) | \
Jens Axboee2e1a142010-06-09 10:42:09 +0200514 (1 << QUEUE_FLAG_SAME_COMP) | \
515 (1 << QUEUE_FLAG_ADD_RANDOM))
Tejun Heo797e7db2006-01-06 09:51:03 +0100516
Jens Axboe94eddfb2013-11-19 09:25:07 -0700517#define QUEUE_FLAG_MQ_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \
Mike Snitzerad9cf3b2014-12-16 12:54:25 -0500518 (1 << QUEUE_FLAG_STACKABLE) | \
Christoph Hellwig8e0b60b2016-03-03 16:04:03 +0100519 (1 << QUEUE_FLAG_SAME_COMP) | \
520 (1 << QUEUE_FLAG_POLL))
Jens Axboe94eddfb2013-11-19 09:25:07 -0700521
Andi Kleen8bcb6c72012-03-30 12:33:28 +0200522static inline void queue_lockdep_assert_held(struct request_queue *q)
Linus Torvalds8f45c1a2008-04-29 10:16:38 -0700523{
Andi Kleen8bcb6c72012-03-30 12:33:28 +0200524 if (q->queue_lock)
525 lockdep_assert_held(q->queue_lock);
Linus Torvalds8f45c1a2008-04-29 10:16:38 -0700526}
527
Nick Piggin75ad23b2008-04-29 14:48:33 +0200528static inline void queue_flag_set_unlocked(unsigned int flag,
529 struct request_queue *q)
530{
531 __set_bit(flag, &q->queue_flags);
532}
533
Jens Axboee48ec692008-07-03 13:18:54 +0200534static inline int queue_flag_test_and_clear(unsigned int flag,
535 struct request_queue *q)
536{
Andi Kleen8bcb6c72012-03-30 12:33:28 +0200537 queue_lockdep_assert_held(q);
Jens Axboee48ec692008-07-03 13:18:54 +0200538
539 if (test_bit(flag, &q->queue_flags)) {
540 __clear_bit(flag, &q->queue_flags);
541 return 1;
542 }
543
544 return 0;
545}
546
547static inline int queue_flag_test_and_set(unsigned int flag,
548 struct request_queue *q)
549{
Andi Kleen8bcb6c72012-03-30 12:33:28 +0200550 queue_lockdep_assert_held(q);
Jens Axboee48ec692008-07-03 13:18:54 +0200551
552 if (!test_bit(flag, &q->queue_flags)) {
553 __set_bit(flag, &q->queue_flags);
554 return 0;
555 }
556
557 return 1;
558}
559
Nick Piggin75ad23b2008-04-29 14:48:33 +0200560static inline void queue_flag_set(unsigned int flag, struct request_queue *q)
561{
Andi Kleen8bcb6c72012-03-30 12:33:28 +0200562 queue_lockdep_assert_held(q);
Nick Piggin75ad23b2008-04-29 14:48:33 +0200563 __set_bit(flag, &q->queue_flags);
564}
565
566static inline void queue_flag_clear_unlocked(unsigned int flag,
567 struct request_queue *q)
568{
569 __clear_bit(flag, &q->queue_flags);
570}
571
Jens Axboe0a7ae2f2009-05-20 08:54:31 +0200572static inline int queue_in_flight(struct request_queue *q)
573{
574 return q->in_flight[0] + q->in_flight[1];
575}
576
Nick Piggin75ad23b2008-04-29 14:48:33 +0200577static inline void queue_flag_clear(unsigned int flag, struct request_queue *q)
578{
Andi Kleen8bcb6c72012-03-30 12:33:28 +0200579 queue_lockdep_assert_held(q);
Nick Piggin75ad23b2008-04-29 14:48:33 +0200580 __clear_bit(flag, &q->queue_flags);
581}
582
Linus Torvalds1da177e2005-04-16 15:20:36 -0700583#define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags)
584#define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags)
Bart Van Assche3f3299d2012-11-28 13:42:38 +0100585#define blk_queue_dying(q) test_bit(QUEUE_FLAG_DYING, &(q)->queue_flags)
Bart Van Asschec246e802012-12-06 14:32:01 +0100586#define blk_queue_dead(q) test_bit(QUEUE_FLAG_DEAD, &(q)->queue_flags)
Tejun Heod7325802012-03-05 13:14:58 -0800587#define blk_queue_bypass(q) test_bit(QUEUE_FLAG_BYPASS, &(q)->queue_flags)
Jens Axboe320ae512013-10-24 09:20:05 +0100588#define blk_queue_init_done(q) test_bit(QUEUE_FLAG_INIT_DONE, &(q)->queue_flags)
Alan D. Brunelleac9fafa2008-04-29 14:44:19 +0200589#define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags)
Alan D. Brunelle488991e2010-01-29 09:04:08 +0100590#define blk_queue_noxmerges(q) \
591 test_bit(QUEUE_FLAG_NOXMERGES, &(q)->queue_flags)
Jens Axboea68bbdd2008-09-24 13:03:33 +0200592#define blk_queue_nonrot(q) test_bit(QUEUE_FLAG_NONROT, &(q)->queue_flags)
Jens Axboebc58ba92009-01-23 10:54:44 +0100593#define blk_queue_io_stat(q) test_bit(QUEUE_FLAG_IO_STAT, &(q)->queue_flags)
Jens Axboee2e1a142010-06-09 10:42:09 +0200594#define blk_queue_add_random(q) test_bit(QUEUE_FLAG_ADD_RANDOM, &(q)->queue_flags)
Kiyoshi Ueda4ee5eaf2008-09-18 10:46:13 -0400595#define blk_queue_stackable(q) \
596 test_bit(QUEUE_FLAG_STACKABLE, &(q)->queue_flags)
Christoph Hellwigc15227d2009-09-30 13:52:12 +0200597#define blk_queue_discard(q) test_bit(QUEUE_FLAG_DISCARD, &(q)->queue_flags)
Christoph Hellwig288dab82016-06-09 16:00:36 +0200598#define blk_queue_secure_erase(q) \
599 (test_bit(QUEUE_FLAG_SECERASE, &(q)->queue_flags))
Toshi Kani163d4ba2016-06-23 17:05:50 -0400600#define blk_queue_dax(q) test_bit(QUEUE_FLAG_DAX, &(q)->queue_flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700601
Christoph Hellwig33659eb2010-08-07 18:17:56 +0200602#define blk_noretry_request(rq) \
603 ((rq)->cmd_flags & (REQ_FAILFAST_DEV|REQ_FAILFAST_TRANSPORT| \
604 REQ_FAILFAST_DRIVER))
Jens Axboe4aff5e22006-08-10 08:44:47 +0200605
Christoph Hellwig33659eb2010-08-07 18:17:56 +0200606#define blk_account_rq(rq) \
607 (((rq)->cmd_flags & REQ_STARTED) && \
Martin K. Petersene2a60da2012-09-18 12:19:25 -0400608 ((rq)->cmd_type == REQ_TYPE_FS))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700609
Jens Axboeab780f12008-08-26 10:25:02 +0200610#define blk_rq_cpu_valid(rq) ((rq)->cpu != -1)
FUJITA Tomonoriabae1fd2007-07-16 08:52:14 +0200611#define blk_bidi_rq(rq) ((rq)->next_rq != NULL)
Kiyoshi Ueda336cdb42007-12-11 17:40:30 -0500612/* rq->queuelist of dequeued request must be list_empty() */
613#define blk_queued_rq(rq) (!list_empty(&(rq)->queuelist))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700614
615#define list_entry_rq(ptr) list_entry((ptr), struct request, queuelist)
616
Mike Christie4e1b2d522016-06-05 14:32:22 -0500617#define rq_data_dir(rq) (op_is_write(req_op(rq)) ? WRITE : READ)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700618
Jens Axboe49fd5242014-04-16 10:57:18 -0600619/*
620 * Driver can handle struct request, if it either has an old style
621 * request_fn defined, or is blk-mq based.
622 */
623static inline bool queue_is_rq_based(struct request_queue *q)
624{
625 return q->request_fn || q->mq_ops;
626}
627
Martin K. Petersene692cb62010-12-01 19:41:49 +0100628static inline unsigned int blk_queue_cluster(struct request_queue *q)
629{
630 return q->limits.cluster;
631}
632
Jens Axboe9e2585a2006-07-28 09:26:13 +0200633/*
Jens Axboe1faa16d2009-04-06 14:48:01 +0200634 * We regard a request as sync, if either a read or a sync write
Jens Axboe9e2585a2006-07-28 09:26:13 +0200635 */
Mike Christied9d8c5c2016-06-05 14:32:16 -0500636static inline bool rw_is_sync(int op, unsigned int rw_flags)
Jens Axboe1faa16d2009-04-06 14:48:01 +0200637{
Mike Christied9d8c5c2016-06-05 14:32:16 -0500638 return op == REQ_OP_READ || (rw_flags & REQ_SYNC);
Jens Axboe1faa16d2009-04-06 14:48:01 +0200639}
640
641static inline bool rq_is_sync(struct request *rq)
642{
Mike Christied9d8c5c2016-06-05 14:32:16 -0500643 return rw_is_sync(req_op(rq), rq->cmd_flags);
Jens Axboe1faa16d2009-04-06 14:48:01 +0200644}
645
Tejun Heo5b788ce2012-06-04 20:40:59 -0700646static inline bool blk_rl_full(struct request_list *rl, bool sync)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700647{
Tejun Heo5b788ce2012-06-04 20:40:59 -0700648 unsigned int flag = sync ? BLK_RL_SYNCFULL : BLK_RL_ASYNCFULL;
649
650 return rl->flags & flag;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700651}
652
Tejun Heo5b788ce2012-06-04 20:40:59 -0700653static inline void blk_set_rl_full(struct request_list *rl, bool sync)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700654{
Tejun Heo5b788ce2012-06-04 20:40:59 -0700655 unsigned int flag = sync ? BLK_RL_SYNCFULL : BLK_RL_ASYNCFULL;
656
657 rl->flags |= flag;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700658}
659
Tejun Heo5b788ce2012-06-04 20:40:59 -0700660static inline void blk_clear_rl_full(struct request_list *rl, bool sync)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700661{
Tejun Heo5b788ce2012-06-04 20:40:59 -0700662 unsigned int flag = sync ? BLK_RL_SYNCFULL : BLK_RL_ASYNCFULL;
663
664 rl->flags &= ~flag;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700665}
666
Martin K. Petersene2a60da2012-09-18 12:19:25 -0400667static inline bool rq_mergeable(struct request *rq)
668{
669 if (rq->cmd_type != REQ_TYPE_FS)
670 return false;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700671
Mike Christie3a5e02c2016-06-05 14:32:23 -0500672 if (req_op(rq) == REQ_OP_FLUSH)
673 return false;
674
Martin K. Petersene2a60da2012-09-18 12:19:25 -0400675 if (rq->cmd_flags & REQ_NOMERGE_FLAGS)
676 return false;
677
678 return true;
679}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700680
Martin K. Petersen4363ac72012-09-18 12:19:27 -0400681static inline bool blk_write_same_mergeable(struct bio *a, struct bio *b)
682{
683 if (bio_data(a) == bio_data(b))
684 return true;
685
686 return false;
687}
688
Linus Torvalds1da177e2005-04-16 15:20:36 -0700689/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700690 * q->prep_rq_fn return values
691 */
Martin K. Petersen0fb5b1f2016-02-04 00:52:12 -0500692enum {
693 BLKPREP_OK, /* serve it */
694 BLKPREP_KILL, /* fatal error, kill, return -EIO */
695 BLKPREP_DEFER, /* leave on queue */
696 BLKPREP_INVALID, /* invalid command, kill, return -EREMOTEIO */
697};
Linus Torvalds1da177e2005-04-16 15:20:36 -0700698
699extern unsigned long blk_max_low_pfn, blk_max_pfn;
700
701/*
702 * standard bounce addresses:
703 *
704 * BLK_BOUNCE_HIGH : bounce all highmem pages
705 * BLK_BOUNCE_ANY : don't bounce anything
706 * BLK_BOUNCE_ISA : bounce pages above ISA DMA boundary
707 */
Andi Kleen24728922008-04-21 09:51:05 +0200708
709#if BITS_PER_LONG == 32
Linus Torvalds1da177e2005-04-16 15:20:36 -0700710#define BLK_BOUNCE_HIGH ((u64)blk_max_low_pfn << PAGE_SHIFT)
Andi Kleen24728922008-04-21 09:51:05 +0200711#else
712#define BLK_BOUNCE_HIGH -1ULL
713#endif
714#define BLK_BOUNCE_ANY (-1ULL)
FUJITA Tomonoribfe17232010-05-31 15:59:03 +0900715#define BLK_BOUNCE_ISA (DMA_BIT_MASK(24))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700716
Jens Axboe3d6392c2007-07-09 12:38:05 +0200717/*
718 * default timeout for SG_IO if none specified
719 */
720#define BLK_DEFAULT_SG_TIMEOUT (60 * HZ)
Linus Torvaldsf2f1fa72008-12-05 14:49:18 -0800721#define BLK_MIN_SG_TIMEOUT (7 * HZ)
Jens Axboe3d6392c2007-07-09 12:38:05 +0200722
Christoph Lameter2a7326b2007-07-17 04:03:37 -0700723#ifdef CONFIG_BOUNCE
Linus Torvalds1da177e2005-04-16 15:20:36 -0700724extern int init_emergency_isa_pool(void);
Jens Axboe165125e2007-07-24 09:28:11 +0200725extern void blk_queue_bounce(struct request_queue *q, struct bio **bio);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700726#else
727static inline int init_emergency_isa_pool(void)
728{
729 return 0;
730}
Jens Axboe165125e2007-07-24 09:28:11 +0200731static inline void blk_queue_bounce(struct request_queue *q, struct bio **bio)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700732{
733}
734#endif /* CONFIG_MMU */
735
FUJITA Tomonori152e2832008-08-28 16:17:06 +0900736struct rq_map_data {
737 struct page **pages;
738 int page_order;
739 int nr_entries;
FUJITA Tomonori56c451f2008-12-18 14:49:37 +0900740 unsigned long offset;
FUJITA Tomonori97ae77a2008-12-18 14:49:38 +0900741 int null_mapped;
FUJITA Tomonoriecb554a2009-07-09 14:46:53 +0200742 int from_user;
FUJITA Tomonori152e2832008-08-28 16:17:06 +0900743};
744
NeilBrown5705f702007-09-25 12:35:59 +0200745struct req_iterator {
Kent Overstreet79886132013-11-23 17:19:00 -0800746 struct bvec_iter iter;
NeilBrown5705f702007-09-25 12:35:59 +0200747 struct bio *bio;
748};
749
750/* This should not be used directly - use rq_for_each_segment */
Jens Axboe1e428072009-02-23 09:03:10 +0100751#define for_each_bio(_bio) \
752 for (; _bio; _bio = _bio->bi_next)
NeilBrown5705f702007-09-25 12:35:59 +0200753#define __rq_for_each_bio(_bio, rq) \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700754 if ((rq->bio)) \
755 for (_bio = (rq)->bio; _bio; _bio = _bio->bi_next)
756
NeilBrown5705f702007-09-25 12:35:59 +0200757#define rq_for_each_segment(bvl, _rq, _iter) \
758 __rq_for_each_bio(_iter.bio, _rq) \
Kent Overstreet79886132013-11-23 17:19:00 -0800759 bio_for_each_segment(bvl, _iter.bio, _iter.iter)
NeilBrown5705f702007-09-25 12:35:59 +0200760
Kent Overstreet4550dd62013-08-07 14:26:21 -0700761#define rq_iter_last(bvec, _iter) \
Kent Overstreet79886132013-11-23 17:19:00 -0800762 (_iter.bio->bi_next == NULL && \
Kent Overstreet4550dd62013-08-07 14:26:21 -0700763 bio_iter_last(bvec, _iter.iter))
NeilBrown5705f702007-09-25 12:35:59 +0200764
Ilya Loginov2d4dc892009-11-26 09:16:19 +0100765#ifndef ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
766# error "You should define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE for your platform"
767#endif
768#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
769extern void rq_flush_dcache_pages(struct request *rq);
770#else
771static inline void rq_flush_dcache_pages(struct request *rq)
772{
773}
774#endif
775
Toshi Kani2af3a812016-05-10 10:23:52 -0600776#ifdef CONFIG_PRINTK
777#define vfs_msg(sb, level, fmt, ...) \
778 __vfs_msg(sb, level, fmt, ##__VA_ARGS__)
779#else
780#define vfs_msg(sb, level, fmt, ...) \
781do { \
782 no_printk(fmt, ##__VA_ARGS__); \
783 __vfs_msg(sb, "", " "); \
784} while (0)
785#endif
786
Linus Torvalds1da177e2005-04-16 15:20:36 -0700787extern int blk_register_queue(struct gendisk *disk);
788extern void blk_unregister_queue(struct gendisk *disk);
Jens Axboedece1632015-11-05 10:41:16 -0700789extern blk_qc_t generic_make_request(struct bio *bio);
FUJITA Tomonori2a4aa302008-04-29 09:54:36 +0200790extern void blk_rq_init(struct request_queue *q, struct request *rq);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700791extern void blk_put_request(struct request *);
Jens Axboe165125e2007-07-24 09:28:11 +0200792extern void __blk_put_request(struct request_queue *, struct request *);
Jens Axboe165125e2007-07-24 09:28:11 +0200793extern struct request *blk_get_request(struct request_queue *, int, gfp_t);
Jens Axboef27b0872014-06-06 07:57:37 -0600794extern void blk_rq_set_block_pc(struct request *);
Jens Axboe165125e2007-07-24 09:28:11 +0200795extern void blk_requeue_request(struct request_queue *, struct request *);
Christoph Hellwig66ac0282010-06-18 16:59:42 +0200796extern void blk_add_request_payload(struct request *rq, struct page *page,
Ming Lin37e58232016-03-22 00:24:44 -0700797 int offset, unsigned int len);
Kiyoshi Uedaef9e3fa2008-10-01 16:12:15 +0200798extern int blk_lld_busy(struct request_queue *q);
Mike Snitzer78d8e582015-06-26 10:01:13 -0400799extern int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
800 struct bio_set *bs, gfp_t gfp_mask,
801 int (*bio_ctr)(struct bio *, struct bio *, void *),
802 void *data);
803extern void blk_rq_unprep_clone(struct request *rq);
Kiyoshi Ueda82124d62008-09-18 10:45:38 -0400804extern int blk_insert_cloned_request(struct request_queue *q,
805 struct request *rq);
Christoph Hellwig98d61d52016-07-19 11:31:51 +0200806extern int blk_rq_append_bio(struct request *rq, struct bio *bio);
Jens Axboe3cca6dc2011-03-02 11:08:00 -0500807extern void blk_delay_queue(struct request_queue *, unsigned long);
Kent Overstreet54efd502015-04-23 22:37:18 -0700808extern void blk_queue_split(struct request_queue *, struct bio **,
809 struct bio_set *);
Jens Axboe165125e2007-07-24 09:28:11 +0200810extern void blk_recount_segments(struct request_queue *, struct bio *);
Paolo Bonzini0bfc96c2012-01-12 16:01:28 +0100811extern int scsi_verify_blk_ioctl(struct block_device *, unsigned int);
Paolo Bonzini577ebb32012-01-12 16:01:27 +0100812extern int scsi_cmd_blk_ioctl(struct block_device *, fmode_t,
813 unsigned int, void __user *);
Al Viro74f3c8a2007-08-27 15:38:10 -0400814extern int scsi_cmd_ioctl(struct request_queue *, struct gendisk *, fmode_t,
815 unsigned int, void __user *);
Al Viroe915e872008-09-02 17:16:41 -0400816extern int sg_scsi_ioctl(struct request_queue *, struct gendisk *, fmode_t,
817 struct scsi_ioctl_command __user *);
Andrew Morton3fcfab12006-10-19 23:28:16 -0700818
Christoph Hellwig6f3b0e82015-11-26 09:13:05 +0100819extern int blk_queue_enter(struct request_queue *q, bool nowait);
Dan Williams2e6edc92015-11-19 13:29:28 -0800820extern void blk_queue_exit(struct request_queue *q);
Jens Axboe165125e2007-07-24 09:28:11 +0200821extern void blk_start_queue(struct request_queue *q);
Jens Axboe21491412015-12-28 13:01:22 -0700822extern void blk_start_queue_async(struct request_queue *q);
Jens Axboe165125e2007-07-24 09:28:11 +0200823extern void blk_stop_queue(struct request_queue *q);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700824extern void blk_sync_queue(struct request_queue *q);
Jens Axboe165125e2007-07-24 09:28:11 +0200825extern void __blk_stop_queue(struct request_queue *q);
Christoph Hellwig24ecfbe2011-04-18 11:41:33 +0200826extern void __blk_run_queue(struct request_queue *q);
Christoph Hellwiga7928c12015-04-17 22:37:20 +0200827extern void __blk_run_queue_uncond(struct request_queue *q);
Jens Axboe165125e2007-07-24 09:28:11 +0200828extern void blk_run_queue(struct request_queue *);
Jens Axboec21e6be2011-04-19 13:32:46 +0200829extern void blk_run_queue_async(struct request_queue *q);
FUJITA Tomonoria3bce902008-08-28 16:17:05 +0900830extern int blk_rq_map_user(struct request_queue *, struct request *,
FUJITA Tomonori152e2832008-08-28 16:17:06 +0900831 struct rq_map_data *, void __user *, unsigned long,
832 gfp_t);
Jens Axboe8e5cfc42006-12-19 11:12:46 +0100833extern int blk_rq_unmap_user(struct bio *);
Jens Axboe165125e2007-07-24 09:28:11 +0200834extern int blk_rq_map_kern(struct request_queue *, struct request *, void *, unsigned int, gfp_t);
835extern int blk_rq_map_user_iov(struct request_queue *, struct request *,
Kent Overstreet26e49cf2015-01-18 16:16:31 +0100836 struct rq_map_data *, const struct iov_iter *,
837 gfp_t);
Jens Axboe165125e2007-07-24 09:28:11 +0200838extern int blk_execute_rq(struct request_queue *, struct gendisk *,
James Bottomley 994ca9a2005-06-20 14:11:09 +0200839 struct request *, int);
Jens Axboe165125e2007-07-24 09:28:11 +0200840extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *,
Jens Axboe15fc8582006-01-06 10:00:50 +0100841 struct request *, int, rq_end_io_fn *);
Mike Christie6e39b69e2005-11-11 05:30:24 -0600842
Jens Axboe05229be2015-11-05 10:44:55 -0700843bool blk_poll(struct request_queue *q, blk_qc_t cookie);
844
Jens Axboe165125e2007-07-24 09:28:11 +0200845static inline struct request_queue *bdev_get_queue(struct block_device *bdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700846{
Tejun Heoff9ea322014-09-08 08:03:56 +0900847 return bdev->bd_disk->queue; /* this is never NULL */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700848}
849
Linus Torvalds1da177e2005-04-16 15:20:36 -0700850/*
Tejun Heo80a761f2009-07-03 17:48:17 +0900851 * blk_rq_pos() : the current sector
852 * blk_rq_bytes() : bytes left in the entire request
853 * blk_rq_cur_bytes() : bytes left in the current segment
854 * blk_rq_err_bytes() : bytes left till the next error boundary
855 * blk_rq_sectors() : sectors left in the entire request
856 * blk_rq_cur_sectors() : sectors left in the current segment
Tejun Heo5efccd12009-04-23 11:05:18 +0900857 */
Tejun Heo5b936292009-05-07 22:24:38 +0900858static inline sector_t blk_rq_pos(const struct request *rq)
859{
Tejun Heoa2dec7b2009-05-07 22:24:44 +0900860 return rq->__sector;
Tejun Heo5b936292009-05-07 22:24:38 +0900861}
862
Tejun Heo2e46e8b2009-05-07 22:24:41 +0900863static inline unsigned int blk_rq_bytes(const struct request *rq)
864{
Tejun Heoa2dec7b2009-05-07 22:24:44 +0900865 return rq->__data_len;
Tejun Heo2e46e8b2009-05-07 22:24:41 +0900866}
867
868static inline int blk_rq_cur_bytes(const struct request *rq)
869{
870 return rq->bio ? bio_cur_bytes(rq->bio) : 0;
871}
Tejun Heo5efccd12009-04-23 11:05:18 +0900872
Tejun Heo80a761f2009-07-03 17:48:17 +0900873extern unsigned int blk_rq_err_bytes(const struct request *rq);
874
Tejun Heo5b936292009-05-07 22:24:38 +0900875static inline unsigned int blk_rq_sectors(const struct request *rq)
876{
Tejun Heo2e46e8b2009-05-07 22:24:41 +0900877 return blk_rq_bytes(rq) >> 9;
Tejun Heo5b936292009-05-07 22:24:38 +0900878}
879
880static inline unsigned int blk_rq_cur_sectors(const struct request *rq)
881{
Tejun Heo2e46e8b2009-05-07 22:24:41 +0900882 return blk_rq_cur_bytes(rq) >> 9;
Tejun Heo5b936292009-05-07 22:24:38 +0900883}
884
Martin K. Petersenf31dc1c2012-09-18 12:19:26 -0400885static inline unsigned int blk_queue_get_max_sectors(struct request_queue *q,
Mike Christie8fe0d472016-06-05 14:32:15 -0500886 int op)
Martin K. Petersenf31dc1c2012-09-18 12:19:26 -0400887{
Adrian Hunter7afafc82016-08-16 10:59:35 +0300888 if (unlikely(op == REQ_OP_DISCARD || op == REQ_OP_SECURE_ERASE))
James Bottomley871dd922013-04-24 08:52:50 -0600889 return min(q->limits.max_discard_sectors, UINT_MAX >> 9);
Martin K. Petersenf31dc1c2012-09-18 12:19:26 -0400890
Mike Christie8fe0d472016-06-05 14:32:15 -0500891 if (unlikely(op == REQ_OP_WRITE_SAME))
Martin K. Petersen4363ac72012-09-18 12:19:27 -0400892 return q->limits.max_write_same_sectors;
893
Martin K. Petersenf31dc1c2012-09-18 12:19:26 -0400894 return q->limits.max_sectors;
895}
896
Jens Axboe762380a2014-06-05 13:38:39 -0600897/*
898 * Return maximum size of a request at given offset. Only valid for
899 * file system requests.
900 */
901static inline unsigned int blk_max_size_offset(struct request_queue *q,
902 sector_t offset)
903{
904 if (!q->limits.chunk_sectors)
Jens Axboe736ed4d2014-06-17 22:09:29 -0700905 return q->limits.max_sectors;
Jens Axboe762380a2014-06-05 13:38:39 -0600906
907 return q->limits.chunk_sectors -
908 (offset & (q->limits.chunk_sectors - 1));
909}
910
Damien Le Moal17007f32016-07-20 21:40:47 -0600911static inline unsigned int blk_rq_get_max_sectors(struct request *rq,
912 sector_t offset)
Martin K. Petersenf31dc1c2012-09-18 12:19:26 -0400913{
914 struct request_queue *q = rq->q;
915
Christoph Hellwigf21018422016-03-03 14:43:45 -0700916 if (unlikely(rq->cmd_type != REQ_TYPE_FS))
Martin K. Petersenf31dc1c2012-09-18 12:19:26 -0400917 return q->limits.max_hw_sectors;
918
Adrian Hunter7afafc82016-08-16 10:59:35 +0300919 if (!q->limits.chunk_sectors ||
920 req_op(rq) == REQ_OP_DISCARD ||
921 req_op(rq) == REQ_OP_SECURE_ERASE)
Mike Christie8fe0d472016-06-05 14:32:15 -0500922 return blk_queue_get_max_sectors(q, req_op(rq));
Jens Axboe762380a2014-06-05 13:38:39 -0600923
Damien Le Moal17007f32016-07-20 21:40:47 -0600924 return min(blk_max_size_offset(q, offset),
Mike Christie8fe0d472016-06-05 14:32:15 -0500925 blk_queue_get_max_sectors(q, req_op(rq)));
Martin K. Petersenf31dc1c2012-09-18 12:19:26 -0400926}
927
Jun'ichi Nomura75afb352013-09-21 13:57:47 -0600928static inline unsigned int blk_rq_count_bios(struct request *rq)
929{
930 unsigned int nr_bios = 0;
931 struct bio *bio;
932
933 __rq_for_each_bio(bio, rq)
934 nr_bios++;
935
936 return nr_bios;
937}
938
Tejun Heo5efccd12009-04-23 11:05:18 +0900939/*
Tejun Heo9934c8c2009-05-08 11:54:16 +0900940 * Request issue related functions.
941 */
942extern struct request *blk_peek_request(struct request_queue *q);
943extern void blk_start_request(struct request *rq);
944extern struct request *blk_fetch_request(struct request_queue *q);
945
946/*
Tejun Heo2e60e022009-04-23 11:05:18 +0900947 * Request completion related functions.
948 *
949 * blk_update_request() completes given number of bytes and updates
950 * the request without completing it.
951 *
Tejun Heof06d9a22009-04-23 11:05:19 +0900952 * blk_end_request() and friends. __blk_end_request() must be called
953 * with the request queue spinlock acquired.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700954 *
955 * Several drivers define their own end_request and call
Kiyoshi Ueda3bcddea2007-12-11 17:52:28 -0500956 * blk_end_request() for parts of the original function.
957 * This prevents code duplication in drivers.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700958 */
Tejun Heo2e60e022009-04-23 11:05:18 +0900959extern bool blk_update_request(struct request *rq, int error,
960 unsigned int nr_bytes);
Christoph Hellwig12120072014-04-16 09:44:59 +0200961extern void blk_finish_request(struct request *rq, int error);
FUJITA Tomonorib1f74492009-05-11 17:56:09 +0900962extern bool blk_end_request(struct request *rq, int error,
963 unsigned int nr_bytes);
964extern void blk_end_request_all(struct request *rq, int error);
965extern bool blk_end_request_cur(struct request *rq, int error);
Tejun Heo80a761f2009-07-03 17:48:17 +0900966extern bool blk_end_request_err(struct request *rq, int error);
FUJITA Tomonorib1f74492009-05-11 17:56:09 +0900967extern bool __blk_end_request(struct request *rq, int error,
968 unsigned int nr_bytes);
969extern void __blk_end_request_all(struct request *rq, int error);
970extern bool __blk_end_request_cur(struct request *rq, int error);
Tejun Heo80a761f2009-07-03 17:48:17 +0900971extern bool __blk_end_request_err(struct request *rq, int error);
Tejun Heo2e60e022009-04-23 11:05:18 +0900972
Jens Axboeff856ba2006-01-09 16:02:34 +0100973extern void blk_complete_request(struct request *);
Jens Axboe242f9dc2008-09-14 05:55:09 -0700974extern void __blk_complete_request(struct request *);
975extern void blk_abort_request(struct request *);
James Bottomley28018c22010-07-01 19:49:17 +0900976extern void blk_unprep_request(struct request *);
Jens Axboeff856ba2006-01-09 16:02:34 +0100977
Linus Torvalds1da177e2005-04-16 15:20:36 -0700978/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700979 * Access functions for manipulating queue properties
980 */
Jens Axboe165125e2007-07-24 09:28:11 +0200981extern struct request_queue *blk_init_queue_node(request_fn_proc *rfn,
Christoph Lameter19460892005-06-23 00:08:19 -0700982 spinlock_t *lock, int node_id);
Jens Axboe165125e2007-07-24 09:28:11 +0200983extern struct request_queue *blk_init_queue(request_fn_proc *, spinlock_t *);
Mike Snitzer01effb02010-05-11 08:57:42 +0200984extern struct request_queue *blk_init_allocated_queue(struct request_queue *,
985 request_fn_proc *, spinlock_t *);
Jens Axboe165125e2007-07-24 09:28:11 +0200986extern void blk_cleanup_queue(struct request_queue *);
987extern void blk_queue_make_request(struct request_queue *, make_request_fn *);
988extern void blk_queue_bounce_limit(struct request_queue *, u64);
Martin K. Petersen086fa5f2010-02-26 00:20:38 -0500989extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int);
Jens Axboe762380a2014-06-05 13:38:39 -0600990extern void blk_queue_chunk_sectors(struct request_queue *, unsigned int);
Martin K. Petersen8a783622010-02-26 00:20:39 -0500991extern void blk_queue_max_segments(struct request_queue *, unsigned short);
Jens Axboe165125e2007-07-24 09:28:11 +0200992extern void blk_queue_max_segment_size(struct request_queue *, unsigned int);
Christoph Hellwig67efc922009-09-30 13:54:20 +0200993extern void blk_queue_max_discard_sectors(struct request_queue *q,
994 unsigned int max_discard_sectors);
Martin K. Petersen4363ac72012-09-18 12:19:27 -0400995extern void blk_queue_max_write_same_sectors(struct request_queue *q,
996 unsigned int max_write_same_sectors);
Martin K. Petersene1defc42009-05-22 17:17:49 -0400997extern void blk_queue_logical_block_size(struct request_queue *, unsigned short);
Martin K. Petersen892b6f92010-10-13 21:18:03 +0200998extern void blk_queue_physical_block_size(struct request_queue *, unsigned int);
Martin K. Petersenc72758f2009-05-22 17:17:53 -0400999extern void blk_queue_alignment_offset(struct request_queue *q,
1000 unsigned int alignment);
Martin K. Petersen7c958e32009-07-31 11:49:11 -04001001extern void blk_limits_io_min(struct queue_limits *limits, unsigned int min);
Martin K. Petersenc72758f2009-05-22 17:17:53 -04001002extern void blk_queue_io_min(struct request_queue *q, unsigned int min);
Martin K. Petersen3c5820c2009-09-11 21:54:52 +02001003extern void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt);
Martin K. Petersenc72758f2009-05-22 17:17:53 -04001004extern void blk_queue_io_opt(struct request_queue *q, unsigned int opt);
Martin K. Petersene475bba2009-06-16 08:23:52 +02001005extern void blk_set_default_limits(struct queue_limits *lim);
Martin K. Petersenb1bd0552012-01-11 16:27:11 +01001006extern void blk_set_stacking_limits(struct queue_limits *lim);
Martin K. Petersenc72758f2009-05-22 17:17:53 -04001007extern int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
1008 sector_t offset);
Martin K. Petersen17be8c22010-01-11 03:21:49 -05001009extern int bdev_stack_limits(struct queue_limits *t, struct block_device *bdev,
1010 sector_t offset);
Martin K. Petersenc72758f2009-05-22 17:17:53 -04001011extern void disk_stack_limits(struct gendisk *disk, struct block_device *bdev,
1012 sector_t offset);
Jens Axboe165125e2007-07-24 09:28:11 +02001013extern void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b);
Tejun Heoe3790c72008-03-04 11:18:17 +01001014extern void blk_queue_dma_pad(struct request_queue *, unsigned int);
FUJITA Tomonori27f82212008-07-04 09:30:03 +02001015extern void blk_queue_update_dma_pad(struct request_queue *, unsigned int);
Tejun Heo2fb98e82008-02-19 11:36:53 +01001016extern int blk_queue_dma_drain(struct request_queue *q,
1017 dma_drain_needed_fn *dma_drain_needed,
1018 void *buf, unsigned int size);
Kiyoshi Uedaef9e3fa2008-10-01 16:12:15 +02001019extern void blk_queue_lld_busy(struct request_queue *q, lld_busy_fn *fn);
Jens Axboe165125e2007-07-24 09:28:11 +02001020extern void blk_queue_segment_boundary(struct request_queue *, unsigned long);
Keith Busch03100aa2015-08-19 14:24:05 -07001021extern void blk_queue_virt_boundary(struct request_queue *, unsigned long);
Jens Axboe165125e2007-07-24 09:28:11 +02001022extern void blk_queue_prep_rq(struct request_queue *, prep_rq_fn *pfn);
James Bottomley28018c22010-07-01 19:49:17 +09001023extern void blk_queue_unprep_rq(struct request_queue *, unprep_rq_fn *ufn);
Jens Axboe165125e2007-07-24 09:28:11 +02001024extern void blk_queue_dma_alignment(struct request_queue *, int);
James Bottomley11c3e682007-12-31 16:37:00 -06001025extern void blk_queue_update_dma_alignment(struct request_queue *, int);
Jens Axboe165125e2007-07-24 09:28:11 +02001026extern void blk_queue_softirq_done(struct request_queue *, softirq_done_fn *);
Jens Axboe242f9dc2008-09-14 05:55:09 -07001027extern void blk_queue_rq_timed_out(struct request_queue *, rq_timed_out_fn *);
1028extern void blk_queue_rq_timeout(struct request_queue *, unsigned int);
shaohua.li@intel.comf3876932011-05-06 11:34:32 -06001029extern void blk_queue_flush_queueable(struct request_queue *q, bool queueable);
Jens Axboe93e9d8e2016-04-12 12:32:46 -06001030extern void blk_queue_write_cache(struct request_queue *q, bool enabled, bool fua);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001031extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001032
Jens Axboe165125e2007-07-24 09:28:11 +02001033extern int blk_rq_map_sg(struct request_queue *, struct request *, struct scatterlist *);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001034extern void blk_dump_rq_flags(struct request *, char *);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001035extern long nr_blockdev_pages(void);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001036
Tejun Heo09ac46c2011-12-14 00:33:38 +01001037bool __must_check blk_get_queue(struct request_queue *);
Jens Axboe165125e2007-07-24 09:28:11 +02001038struct request_queue *blk_alloc_queue(gfp_t);
1039struct request_queue *blk_alloc_queue_node(gfp_t, int);
1040extern void blk_put_queue(struct request_queue *);
Jens Axboe3f21c262015-06-05 10:57:37 -06001041extern void blk_set_queue_dying(struct request_queue *);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001042
Shaohua Li316cc672011-07-08 08:19:21 +02001043/*
Lin Ming6c954662013-03-23 11:42:26 +08001044 * block layer runtime pm functions
1045 */
Rafael J. Wysocki47fafbc2014-12-04 01:00:23 +01001046#ifdef CONFIG_PM
Lin Ming6c954662013-03-23 11:42:26 +08001047extern void blk_pm_runtime_init(struct request_queue *q, struct device *dev);
1048extern int blk_pre_runtime_suspend(struct request_queue *q);
1049extern void blk_post_runtime_suspend(struct request_queue *q, int err);
1050extern void blk_pre_runtime_resume(struct request_queue *q);
1051extern void blk_post_runtime_resume(struct request_queue *q, int err);
Mika Westerbergd07ab6d2016-02-18 10:54:11 +02001052extern void blk_set_runtime_active(struct request_queue *q);
Lin Ming6c954662013-03-23 11:42:26 +08001053#else
1054static inline void blk_pm_runtime_init(struct request_queue *q,
1055 struct device *dev) {}
1056static inline int blk_pre_runtime_suspend(struct request_queue *q)
1057{
1058 return -ENOSYS;
1059}
1060static inline void blk_post_runtime_suspend(struct request_queue *q, int err) {}
1061static inline void blk_pre_runtime_resume(struct request_queue *q) {}
1062static inline void blk_post_runtime_resume(struct request_queue *q, int err) {}
Tobias Klauserf99694c2016-11-18 15:16:06 +01001063static inline void blk_set_runtime_active(struct request_queue *q) {}
Lin Ming6c954662013-03-23 11:42:26 +08001064#endif
1065
1066/*
Suresh Jayaraman75df7132011-09-21 10:00:16 +02001067 * blk_plug permits building a queue of related requests by holding the I/O
1068 * fragments for a short period. This allows merging of sequential requests
1069 * into single larger request. As the requests are moved from a per-task list to
1070 * the device's request_queue in a batch, this results in improved scalability
1071 * as the lock contention for request_queue lock is reduced.
1072 *
1073 * It is ok not to disable preemption when adding the request to the plug list
1074 * or when attempting a merge, because blk_schedule_flush_list() will only flush
1075 * the plug list when the task sleeps by itself. For details, please see
1076 * schedule() where blk_schedule_flush_plug() is called.
Shaohua Li316cc672011-07-08 08:19:21 +02001077 */
Jens Axboe73c10102011-03-08 13:19:51 +01001078struct blk_plug {
Suresh Jayaraman75df7132011-09-21 10:00:16 +02001079 struct list_head list; /* requests */
Jens Axboe320ae512013-10-24 09:20:05 +01001080 struct list_head mq_list; /* blk-mq requests */
Suresh Jayaraman75df7132011-09-21 10:00:16 +02001081 struct list_head cb_list; /* md requires an unplug callback */
Jens Axboe73c10102011-03-08 13:19:51 +01001082};
Shaohua Li55c022b2011-07-08 08:19:20 +02001083#define BLK_MAX_REQUEST_COUNT 16
1084
NeilBrown9cbb1752012-07-31 09:08:14 +02001085struct blk_plug_cb;
NeilBrown74018dc2012-07-31 09:08:15 +02001086typedef void (*blk_plug_cb_fn)(struct blk_plug_cb *, bool);
NeilBrown048c9372011-04-18 09:52:22 +02001087struct blk_plug_cb {
1088 struct list_head list;
NeilBrown9cbb1752012-07-31 09:08:14 +02001089 blk_plug_cb_fn callback;
1090 void *data;
NeilBrown048c9372011-04-18 09:52:22 +02001091};
NeilBrown9cbb1752012-07-31 09:08:14 +02001092extern struct blk_plug_cb *blk_check_plugged(blk_plug_cb_fn unplug,
1093 void *data, int size);
Jens Axboe73c10102011-03-08 13:19:51 +01001094extern void blk_start_plug(struct blk_plug *);
1095extern void blk_finish_plug(struct blk_plug *);
Jens Axboef6603782011-04-15 15:49:07 +02001096extern void blk_flush_plug_list(struct blk_plug *, bool);
Jens Axboe73c10102011-03-08 13:19:51 +01001097
1098static inline void blk_flush_plug(struct task_struct *tsk)
1099{
1100 struct blk_plug *plug = tsk->plug;
1101
Christoph Hellwig88b996c2011-04-15 15:20:10 +02001102 if (plug)
Jens Axboea237c1c2011-04-16 13:27:55 +02001103 blk_flush_plug_list(plug, false);
1104}
1105
1106static inline void blk_schedule_flush_plug(struct task_struct *tsk)
1107{
1108 struct blk_plug *plug = tsk->plug;
1109
1110 if (plug)
Jens Axboef6603782011-04-15 15:49:07 +02001111 blk_flush_plug_list(plug, true);
Jens Axboe73c10102011-03-08 13:19:51 +01001112}
1113
1114static inline bool blk_needs_flush_plug(struct task_struct *tsk)
1115{
1116 struct blk_plug *plug = tsk->plug;
1117
Jens Axboe320ae512013-10-24 09:20:05 +01001118 return plug &&
1119 (!list_empty(&plug->list) ||
1120 !list_empty(&plug->mq_list) ||
1121 !list_empty(&plug->cb_list));
Jens Axboe73c10102011-03-08 13:19:51 +01001122}
1123
Linus Torvalds1da177e2005-04-16 15:20:36 -07001124/*
1125 * tag stuff
1126 */
Jens Axboe165125e2007-07-24 09:28:11 +02001127extern int blk_queue_start_tag(struct request_queue *, struct request *);
1128extern struct request *blk_queue_find_tag(struct request_queue *, int);
1129extern void blk_queue_end_tag(struct request_queue *, struct request *);
Shaohua Liee1b6f72015-01-15 17:32:25 -08001130extern int blk_queue_init_tags(struct request_queue *, int, struct blk_queue_tag *, int);
Jens Axboe165125e2007-07-24 09:28:11 +02001131extern void blk_queue_free_tags(struct request_queue *);
1132extern int blk_queue_resize_tags(struct request_queue *, int);
1133extern void blk_queue_invalidate_tags(struct request_queue *);
Shaohua Liee1b6f72015-01-15 17:32:25 -08001134extern struct blk_queue_tag *blk_init_tags(int, int);
James Bottomley492dfb42006-08-30 15:48:45 -04001135extern void blk_free_tags(struct blk_queue_tag *);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001136
David C Somayajuluf583f492006-10-04 08:27:25 +02001137static inline struct request *blk_map_queue_find_tag(struct blk_queue_tag *bqt,
1138 int tag)
1139{
1140 if (unlikely(bqt == NULL || tag >= bqt->real_max_depth))
1141 return NULL;
1142 return bqt->tag_index[tag];
1143}
Christoph Hellwigdd3932e2010-09-16 20:51:46 +02001144
Christoph Hellwige950fdf2016-07-19 11:23:33 +02001145
1146#define BLKDEV_DISCARD_SECURE (1 << 0) /* issue a secure erase */
1147#define BLKDEV_DISCARD_ZERO (1 << 1) /* must reliably zero data */
Christoph Hellwigdd3932e2010-09-16 20:51:46 +02001148
1149extern int blkdev_issue_flush(struct block_device *, gfp_t, sector_t *);
Dmitry Monakhovfbd9b092010-04-28 17:55:06 +04001150extern int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
1151 sector_t nr_sects, gfp_t gfp_mask, unsigned long flags);
Christoph Hellwig38f25252016-04-16 14:55:28 -04001152extern int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
Christoph Hellwig288dab82016-06-09 16:00:36 +02001153 sector_t nr_sects, gfp_t gfp_mask, int flags,
Mike Christie469e3212016-06-05 14:31:49 -05001154 struct bio **biop);
Martin K. Petersen4363ac72012-09-18 12:19:27 -04001155extern int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
1156 sector_t nr_sects, gfp_t gfp_mask, struct page *page);
Dmitry Monakhov3f14d792010-04-28 17:55:09 +04001157extern int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
Martin K. Petersend93ba7a2015-01-20 20:06:30 -05001158 sector_t nr_sects, gfp_t gfp_mask, bool discard);
Christoph Hellwig2cf6d262010-08-18 05:29:10 -04001159static inline int sb_issue_discard(struct super_block *sb, sector_t block,
1160 sector_t nr_blocks, gfp_t gfp_mask, unsigned long flags)
David Woodhousefb2dce82008-08-05 18:01:53 +01001161{
Christoph Hellwig2cf6d262010-08-18 05:29:10 -04001162 return blkdev_issue_discard(sb->s_bdev, block << (sb->s_blocksize_bits - 9),
1163 nr_blocks << (sb->s_blocksize_bits - 9),
1164 gfp_mask, flags);
David Woodhousefb2dce82008-08-05 18:01:53 +01001165}
Lukas Czernere6fa0be2010-10-27 21:30:04 -04001166static inline int sb_issue_zeroout(struct super_block *sb, sector_t block,
Theodore Ts'oa107e5a2010-10-27 23:44:47 -04001167 sector_t nr_blocks, gfp_t gfp_mask)
Lukas Czernere6fa0be2010-10-27 21:30:04 -04001168{
1169 return blkdev_issue_zeroout(sb->s_bdev,
1170 block << (sb->s_blocksize_bits - 9),
1171 nr_blocks << (sb->s_blocksize_bits - 9),
Martin K. Petersend93ba7a2015-01-20 20:06:30 -05001172 gfp_mask, true);
Lukas Czernere6fa0be2010-10-27 21:30:04 -04001173}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001174
Jens Axboe018e0442009-06-26 16:27:10 +02001175extern int blk_verify_command(unsigned char *cmd, fmode_t has_write_perm);
Adel Gadllah0b07de82008-06-26 13:48:27 +02001176
Martin K. Peterseneb28d312010-02-26 00:20:37 -05001177enum blk_default_limits {
1178 BLK_MAX_SEGMENTS = 128,
1179 BLK_SAFE_MAX_SECTORS = 255,
Jeff Moyerd2be5372015-08-13 14:57:57 -04001180 BLK_DEF_MAX_SECTORS = 2560,
Martin K. Peterseneb28d312010-02-26 00:20:37 -05001181 BLK_MAX_SEGMENT_SIZE = 65536,
1182 BLK_SEG_BOUNDARY_MASK = 0xFFFFFFFFUL,
1183};
Milan Broz0e435ac2008-12-03 12:55:08 +01001184
Linus Torvalds1da177e2005-04-16 15:20:36 -07001185#define blkdev_entry_to_request(entry) list_entry((entry), struct request, queuelist)
1186
Martin K. Petersenae03bf62009-05-22 17:17:50 -04001187static inline unsigned long queue_bounce_pfn(struct request_queue *q)
1188{
Martin K. Petersen025146e2009-05-22 17:17:51 -04001189 return q->limits.bounce_pfn;
Martin K. Petersenae03bf62009-05-22 17:17:50 -04001190}
1191
1192static inline unsigned long queue_segment_boundary(struct request_queue *q)
1193{
Martin K. Petersen025146e2009-05-22 17:17:51 -04001194 return q->limits.seg_boundary_mask;
Martin K. Petersenae03bf62009-05-22 17:17:50 -04001195}
1196
Keith Busch03100aa2015-08-19 14:24:05 -07001197static inline unsigned long queue_virt_boundary(struct request_queue *q)
1198{
1199 return q->limits.virt_boundary_mask;
1200}
1201
Martin K. Petersenae03bf62009-05-22 17:17:50 -04001202static inline unsigned int queue_max_sectors(struct request_queue *q)
1203{
Martin K. Petersen025146e2009-05-22 17:17:51 -04001204 return q->limits.max_sectors;
Martin K. Petersenae03bf62009-05-22 17:17:50 -04001205}
1206
1207static inline unsigned int queue_max_hw_sectors(struct request_queue *q)
1208{
Martin K. Petersen025146e2009-05-22 17:17:51 -04001209 return q->limits.max_hw_sectors;
Martin K. Petersenae03bf62009-05-22 17:17:50 -04001210}
1211
Martin K. Petersen8a783622010-02-26 00:20:39 -05001212static inline unsigned short queue_max_segments(struct request_queue *q)
Martin K. Petersenae03bf62009-05-22 17:17:50 -04001213{
Martin K. Petersen8a783622010-02-26 00:20:39 -05001214 return q->limits.max_segments;
Martin K. Petersenae03bf62009-05-22 17:17:50 -04001215}
1216
1217static inline unsigned int queue_max_segment_size(struct request_queue *q)
1218{
Martin K. Petersen025146e2009-05-22 17:17:51 -04001219 return q->limits.max_segment_size;
Martin K. Petersenae03bf62009-05-22 17:17:50 -04001220}
1221
Martin K. Petersene1defc42009-05-22 17:17:49 -04001222static inline unsigned short queue_logical_block_size(struct request_queue *q)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001223{
1224 int retval = 512;
1225
Martin K. Petersen025146e2009-05-22 17:17:51 -04001226 if (q && q->limits.logical_block_size)
1227 retval = q->limits.logical_block_size;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001228
1229 return retval;
1230}
1231
Martin K. Petersene1defc42009-05-22 17:17:49 -04001232static inline unsigned short bdev_logical_block_size(struct block_device *bdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001233{
Martin K. Petersene1defc42009-05-22 17:17:49 -04001234 return queue_logical_block_size(bdev_get_queue(bdev));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001235}
1236
Martin K. Petersenc72758f2009-05-22 17:17:53 -04001237static inline unsigned int queue_physical_block_size(struct request_queue *q)
1238{
1239 return q->limits.physical_block_size;
1240}
1241
Martin K. Petersen892b6f92010-10-13 21:18:03 +02001242static inline unsigned int bdev_physical_block_size(struct block_device *bdev)
Martin K. Petersenac481c22009-10-03 20:52:01 +02001243{
1244 return queue_physical_block_size(bdev_get_queue(bdev));
1245}
1246
Martin K. Petersenc72758f2009-05-22 17:17:53 -04001247static inline unsigned int queue_io_min(struct request_queue *q)
1248{
1249 return q->limits.io_min;
1250}
1251
Martin K. Petersenac481c22009-10-03 20:52:01 +02001252static inline int bdev_io_min(struct block_device *bdev)
1253{
1254 return queue_io_min(bdev_get_queue(bdev));
1255}
1256
Martin K. Petersenc72758f2009-05-22 17:17:53 -04001257static inline unsigned int queue_io_opt(struct request_queue *q)
1258{
1259 return q->limits.io_opt;
1260}
1261
Martin K. Petersenac481c22009-10-03 20:52:01 +02001262static inline int bdev_io_opt(struct block_device *bdev)
1263{
1264 return queue_io_opt(bdev_get_queue(bdev));
1265}
1266
Martin K. Petersenc72758f2009-05-22 17:17:53 -04001267static inline int queue_alignment_offset(struct request_queue *q)
1268{
Martin K. Petersenac481c22009-10-03 20:52:01 +02001269 if (q->limits.misaligned)
Martin K. Petersenc72758f2009-05-22 17:17:53 -04001270 return -1;
1271
Martin K. Petersenac481c22009-10-03 20:52:01 +02001272 return q->limits.alignment_offset;
Martin K. Petersenc72758f2009-05-22 17:17:53 -04001273}
1274
Martin K. Petersene03a72e2010-01-11 03:21:51 -05001275static inline int queue_limit_alignment_offset(struct queue_limits *lim, sector_t sector)
Martin K. Petersen81744ee2009-12-29 08:35:35 +01001276{
1277 unsigned int granularity = max(lim->physical_block_size, lim->io_min);
Mike Snitzerb8839b82014-10-08 18:26:13 -04001278 unsigned int alignment = sector_div(sector, granularity >> 9) << 9;
Martin K. Petersen81744ee2009-12-29 08:35:35 +01001279
Mike Snitzerb8839b82014-10-08 18:26:13 -04001280 return (granularity + lim->alignment_offset - alignment) % granularity;
Martin K. Petersenc72758f2009-05-22 17:17:53 -04001281}
1282
Martin K. Petersenac481c22009-10-03 20:52:01 +02001283static inline int bdev_alignment_offset(struct block_device *bdev)
1284{
1285 struct request_queue *q = bdev_get_queue(bdev);
1286
1287 if (q->limits.misaligned)
1288 return -1;
1289
1290 if (bdev != bdev->bd_contains)
1291 return bdev->bd_part->alignment_offset;
1292
1293 return q->limits.alignment_offset;
1294}
1295
Martin K. Petersen86b37282009-11-10 11:50:21 +01001296static inline int queue_discard_alignment(struct request_queue *q)
1297{
1298 if (q->limits.discard_misaligned)
1299 return -1;
1300
1301 return q->limits.discard_alignment;
1302}
1303
Martin K. Petersene03a72e2010-01-11 03:21:51 -05001304static inline int queue_limit_discard_alignment(struct queue_limits *lim, sector_t sector)
Martin K. Petersen86b37282009-11-10 11:50:21 +01001305{
Linus Torvalds59771072012-12-19 07:18:35 -08001306 unsigned int alignment, granularity, offset;
Martin K. Petersendd3d1452010-01-11 03:21:48 -05001307
Martin K. Petersena934a002011-05-18 10:37:35 +02001308 if (!lim->max_discard_sectors)
1309 return 0;
1310
Linus Torvalds59771072012-12-19 07:18:35 -08001311 /* Why are these in bytes, not sectors? */
1312 alignment = lim->discard_alignment >> 9;
1313 granularity = lim->discard_granularity >> 9;
1314 if (!granularity)
1315 return 0;
1316
1317 /* Offset of the partition start in 'granularity' sectors */
1318 offset = sector_div(sector, granularity);
1319
1320 /* And why do we do this modulus *again* in blkdev_issue_discard()? */
1321 offset = (granularity + alignment - offset) % granularity;
1322
1323 /* Turn it back into bytes, gaah */
1324 return offset << 9;
Martin K. Petersen86b37282009-11-10 11:50:21 +01001325}
1326
Paolo Bonzinic6e66632012-08-02 09:48:50 +02001327static inline int bdev_discard_alignment(struct block_device *bdev)
1328{
1329 struct request_queue *q = bdev_get_queue(bdev);
1330
1331 if (bdev != bdev->bd_contains)
1332 return bdev->bd_part->discard_alignment;
1333
1334 return q->limits.discard_alignment;
1335}
1336
Martin K. Petersen98262f22009-12-03 09:24:48 +01001337static inline unsigned int queue_discard_zeroes_data(struct request_queue *q)
1338{
Martin K. Petersena934a002011-05-18 10:37:35 +02001339 if (q->limits.max_discard_sectors && q->limits.discard_zeroes_data == 1)
Martin K. Petersen98262f22009-12-03 09:24:48 +01001340 return 1;
1341
1342 return 0;
1343}
1344
1345static inline unsigned int bdev_discard_zeroes_data(struct block_device *bdev)
1346{
1347 return queue_discard_zeroes_data(bdev_get_queue(bdev));
1348}
1349
Martin K. Petersen4363ac72012-09-18 12:19:27 -04001350static inline unsigned int bdev_write_same(struct block_device *bdev)
1351{
1352 struct request_queue *q = bdev_get_queue(bdev);
1353
1354 if (q)
1355 return q->limits.max_write_same_sectors;
1356
1357 return 0;
1358}
1359
Jens Axboe165125e2007-07-24 09:28:11 +02001360static inline int queue_dma_alignment(struct request_queue *q)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001361{
Pete Wyckoff482eb682008-01-01 10:23:02 -05001362 return q ? q->dma_alignment : 511;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001363}
1364
Namhyung Kim14417792010-09-15 13:08:27 +02001365static inline int blk_rq_aligned(struct request_queue *q, unsigned long addr,
FUJITA Tomonori87904072008-08-28 15:05:58 +09001366 unsigned int len)
1367{
1368 unsigned int alignment = queue_dma_alignment(q) | q->dma_pad_mask;
Namhyung Kim14417792010-09-15 13:08:27 +02001369 return !(addr & alignment) && !(len & alignment);
FUJITA Tomonori87904072008-08-28 15:05:58 +09001370}
1371
Linus Torvalds1da177e2005-04-16 15:20:36 -07001372/* assumes size > 256 */
1373static inline unsigned int blksize_bits(unsigned int size)
1374{
1375 unsigned int bits = 8;
1376 do {
1377 bits++;
1378 size >>= 1;
1379 } while (size > 256);
1380 return bits;
1381}
1382
Adrian Bunk2befb9e2005-09-10 00:27:17 -07001383static inline unsigned int block_size(struct block_device *bdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001384{
1385 return bdev->bd_block_size;
1386}
1387
shaohua.li@intel.comf3876932011-05-06 11:34:32 -06001388static inline bool queue_flush_queueable(struct request_queue *q)
1389{
Jens Axboec888a8f2016-04-13 13:33:19 -06001390 return !test_bit(QUEUE_FLAG_FLUSH_NQ, &q->queue_flags);
shaohua.li@intel.comf3876932011-05-06 11:34:32 -06001391}
1392
Linus Torvalds1da177e2005-04-16 15:20:36 -07001393typedef struct {struct page *v;} Sector;
1394
1395unsigned char *read_dev_sector(struct block_device *, sector_t, Sector *);
1396
1397static inline void put_dev_sector(Sector p)
1398{
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001399 put_page(p.v);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001400}
1401
Ming Leie0af2912016-02-26 23:40:51 +08001402static inline bool __bvec_gap_to_prev(struct request_queue *q,
1403 struct bio_vec *bprv, unsigned int offset)
1404{
1405 return offset ||
1406 ((bprv->bv_offset + bprv->bv_len) & queue_virt_boundary(q));
1407}
1408
Keith Busch03100aa2015-08-19 14:24:05 -07001409/*
1410 * Check if adding a bio_vec after bprv with offset would create a gap in
1411 * the SG list. Most drivers don't care about this, but some do.
1412 */
1413static inline bool bvec_gap_to_prev(struct request_queue *q,
1414 struct bio_vec *bprv, unsigned int offset)
1415{
1416 if (!queue_virt_boundary(q))
1417 return false;
Ming Leie0af2912016-02-26 23:40:51 +08001418 return __bvec_gap_to_prev(q, bprv, offset);
Keith Busch03100aa2015-08-19 14:24:05 -07001419}
1420
Jens Axboe5e7c4272015-09-03 19:28:20 +03001421static inline bool bio_will_gap(struct request_queue *q, struct bio *prev,
1422 struct bio *next)
1423{
Ming Lei25e71a92016-02-26 23:40:52 +08001424 if (bio_has_data(prev) && queue_virt_boundary(q)) {
1425 struct bio_vec pb, nb;
Jens Axboe5e7c4272015-09-03 19:28:20 +03001426
Ming Lei25e71a92016-02-26 23:40:52 +08001427 bio_get_last_bvec(prev, &pb);
1428 bio_get_first_bvec(next, &nb);
1429
1430 return __bvec_gap_to_prev(q, &pb, nb.bv_offset);
1431 }
1432
1433 return false;
Jens Axboe5e7c4272015-09-03 19:28:20 +03001434}
1435
1436static inline bool req_gap_back_merge(struct request *req, struct bio *bio)
1437{
1438 return bio_will_gap(req->q, req->biotail, bio);
1439}
1440
1441static inline bool req_gap_front_merge(struct request *req, struct bio *bio)
1442{
1443 return bio_will_gap(req->q, bio, req->bio);
1444}
1445
Jens Axboe59c3d452014-04-08 09:15:35 -06001446int kblockd_schedule_work(struct work_struct *work);
Jens Axboeee63cfa2016-08-24 15:52:48 -06001447int kblockd_schedule_work_on(int cpu, struct work_struct *work);
Jens Axboe59c3d452014-04-08 09:15:35 -06001448int kblockd_schedule_delayed_work(struct delayed_work *dwork, unsigned long delay);
Jens Axboe8ab14592014-04-08 09:17:40 -06001449int kblockd_schedule_delayed_work_on(int cpu, struct delayed_work *dwork, unsigned long delay);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001450
Divyesh Shah91952912010-04-01 15:01:41 -07001451#ifdef CONFIG_BLK_CGROUP
Jens Axboe28f41972010-06-01 12:23:18 +02001452/*
1453 * This should not be using sched_clock(). A real patch is in progress
1454 * to fix this up, until that is in place we need to disable preemption
1455 * around sched_clock() in this function and set_io_start_time_ns().
1456 */
Divyesh Shah91952912010-04-01 15:01:41 -07001457static inline void set_start_time_ns(struct request *req)
1458{
Jens Axboe28f41972010-06-01 12:23:18 +02001459 preempt_disable();
Divyesh Shah91952912010-04-01 15:01:41 -07001460 req->start_time_ns = sched_clock();
Jens Axboe28f41972010-06-01 12:23:18 +02001461 preempt_enable();
Divyesh Shah91952912010-04-01 15:01:41 -07001462}
1463
1464static inline void set_io_start_time_ns(struct request *req)
1465{
Jens Axboe28f41972010-06-01 12:23:18 +02001466 preempt_disable();
Divyesh Shah91952912010-04-01 15:01:41 -07001467 req->io_start_time_ns = sched_clock();
Jens Axboe28f41972010-06-01 12:23:18 +02001468 preempt_enable();
Divyesh Shah91952912010-04-01 15:01:41 -07001469}
Divyesh Shah84c124d2010-04-09 08:31:19 +02001470
1471static inline uint64_t rq_start_time_ns(struct request *req)
1472{
1473 return req->start_time_ns;
1474}
1475
1476static inline uint64_t rq_io_start_time_ns(struct request *req)
1477{
1478 return req->io_start_time_ns;
1479}
Divyesh Shah91952912010-04-01 15:01:41 -07001480#else
1481static inline void set_start_time_ns(struct request *req) {}
1482static inline void set_io_start_time_ns(struct request *req) {}
Divyesh Shah84c124d2010-04-09 08:31:19 +02001483static inline uint64_t rq_start_time_ns(struct request *req)
1484{
1485 return 0;
1486}
1487static inline uint64_t rq_io_start_time_ns(struct request *req)
1488{
1489 return 0;
1490}
Divyesh Shah91952912010-04-01 15:01:41 -07001491#endif
1492
Linus Torvalds1da177e2005-04-16 15:20:36 -07001493#define MODULE_ALIAS_BLOCKDEV(major,minor) \
1494 MODULE_ALIAS("block-major-" __stringify(major) "-" __stringify(minor))
1495#define MODULE_ALIAS_BLOCKDEV_MAJOR(major) \
1496 MODULE_ALIAS("block-major-" __stringify(major) "-*")
1497
Martin K. Petersen7ba1ba12008-06-30 20:04:41 +02001498#if defined(CONFIG_BLK_DEV_INTEGRITY)
1499
Martin K. Petersen8288f492014-09-26 19:20:02 -04001500enum blk_integrity_flags {
1501 BLK_INTEGRITY_VERIFY = 1 << 0,
1502 BLK_INTEGRITY_GENERATE = 1 << 1,
Martin K. Petersen3aec2f42014-09-26 19:20:03 -04001503 BLK_INTEGRITY_DEVICE_CAPABLE = 1 << 2,
Martin K. Petersenaae7df52014-09-26 19:20:05 -04001504 BLK_INTEGRITY_IP_CHECKSUM = 1 << 3,
Martin K. Petersen8288f492014-09-26 19:20:02 -04001505};
Martin K. Petersen7ba1ba12008-06-30 20:04:41 +02001506
Martin K. Petersen18593082014-09-26 19:20:01 -04001507struct blk_integrity_iter {
Martin K. Petersen7ba1ba12008-06-30 20:04:41 +02001508 void *prot_buf;
1509 void *data_buf;
Martin K. Petersen3be91c42014-09-26 19:19:59 -04001510 sector_t seed;
Martin K. Petersen7ba1ba12008-06-30 20:04:41 +02001511 unsigned int data_size;
Martin K. Petersen3be91c42014-09-26 19:19:59 -04001512 unsigned short interval;
Martin K. Petersen7ba1ba12008-06-30 20:04:41 +02001513 const char *disk_name;
1514};
1515
Martin K. Petersen18593082014-09-26 19:20:01 -04001516typedef int (integrity_processing_fn) (struct blk_integrity_iter *);
Martin K. Petersen7ba1ba12008-06-30 20:04:41 +02001517
Martin K. Petersen0f8087e2015-10-21 13:19:33 -04001518struct blk_integrity_profile {
1519 integrity_processing_fn *generate_fn;
1520 integrity_processing_fn *verify_fn;
1521 const char *name;
1522};
1523
Martin K. Petersen25520d52015-10-21 13:19:49 -04001524extern void blk_integrity_register(struct gendisk *, struct blk_integrity *);
Martin K. Petersen7ba1ba12008-06-30 20:04:41 +02001525extern void blk_integrity_unregister(struct gendisk *);
Martin K. Petersenad7fce92008-10-01 03:38:39 -04001526extern int blk_integrity_compare(struct gendisk *, struct gendisk *);
Martin K. Petersen13f05c82010-09-10 20:50:10 +02001527extern int blk_rq_map_integrity_sg(struct request_queue *, struct bio *,
1528 struct scatterlist *);
1529extern int blk_rq_count_integrity_sg(struct request_queue *, struct bio *);
Martin K. Petersen4eaf99b2014-09-26 19:20:06 -04001530extern bool blk_integrity_merge_rq(struct request_queue *, struct request *,
1531 struct request *);
1532extern bool blk_integrity_merge_bio(struct request_queue *, struct request *,
1533 struct bio *);
Martin K. Petersen7ba1ba12008-06-30 20:04:41 +02001534
Martin K. Petersen25520d52015-10-21 13:19:49 -04001535static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk)
1536{
Dan Williamsac6fc482015-10-21 13:20:18 -04001537 struct blk_integrity *bi = &disk->queue->integrity;
Martin K. Petersen25520d52015-10-21 13:19:49 -04001538
1539 if (!bi->profile)
1540 return NULL;
1541
1542 return bi;
1543}
1544
Jens Axboeb04accc2008-10-02 12:53:22 +02001545static inline
1546struct blk_integrity *bdev_get_integrity(struct block_device *bdev)
1547{
Martin K. Petersen25520d52015-10-21 13:19:49 -04001548 return blk_get_integrity(bdev->bd_disk);
Martin K. Petersenb02739b2008-10-02 18:47:49 +02001549}
1550
Martin K. Petersen180b2f92014-09-26 19:19:56 -04001551static inline bool blk_integrity_rq(struct request *rq)
Martin K. Petersen7ba1ba12008-06-30 20:04:41 +02001552{
Martin K. Petersen180b2f92014-09-26 19:19:56 -04001553 return rq->cmd_flags & REQ_INTEGRITY;
Martin K. Petersen7ba1ba12008-06-30 20:04:41 +02001554}
1555
Martin K. Petersen13f05c82010-09-10 20:50:10 +02001556static inline void blk_queue_max_integrity_segments(struct request_queue *q,
1557 unsigned int segs)
1558{
1559 q->limits.max_integrity_segments = segs;
1560}
1561
1562static inline unsigned short
1563queue_max_integrity_segments(struct request_queue *q)
1564{
1565 return q->limits.max_integrity_segments;
1566}
1567
Sagi Grimberg7f39add2015-09-11 09:03:04 -06001568static inline bool integrity_req_gap_back_merge(struct request *req,
1569 struct bio *next)
1570{
1571 struct bio_integrity_payload *bip = bio_integrity(req->bio);
1572 struct bio_integrity_payload *bip_next = bio_integrity(next);
1573
1574 return bvec_gap_to_prev(req->q, &bip->bip_vec[bip->bip_vcnt - 1],
1575 bip_next->bip_vec[0].bv_offset);
1576}
1577
1578static inline bool integrity_req_gap_front_merge(struct request *req,
1579 struct bio *bio)
1580{
1581 struct bio_integrity_payload *bip = bio_integrity(bio);
1582 struct bio_integrity_payload *bip_next = bio_integrity(req->bio);
1583
1584 return bvec_gap_to_prev(req->q, &bip->bip_vec[bip->bip_vcnt - 1],
1585 bip_next->bip_vec[0].bv_offset);
1586}
1587
Martin K. Petersen7ba1ba12008-06-30 20:04:41 +02001588#else /* CONFIG_BLK_DEV_INTEGRITY */
1589
Stephen Rothwellfd832402012-01-12 09:17:30 +01001590struct bio;
1591struct block_device;
1592struct gendisk;
1593struct blk_integrity;
1594
1595static inline int blk_integrity_rq(struct request *rq)
1596{
1597 return 0;
1598}
1599static inline int blk_rq_count_integrity_sg(struct request_queue *q,
1600 struct bio *b)
1601{
1602 return 0;
1603}
1604static inline int blk_rq_map_integrity_sg(struct request_queue *q,
1605 struct bio *b,
1606 struct scatterlist *s)
1607{
1608 return 0;
1609}
1610static inline struct blk_integrity *bdev_get_integrity(struct block_device *b)
1611{
Michele Curti61a04e52014-10-09 15:30:17 -07001612 return NULL;
Stephen Rothwellfd832402012-01-12 09:17:30 +01001613}
1614static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk)
1615{
1616 return NULL;
1617}
1618static inline int blk_integrity_compare(struct gendisk *a, struct gendisk *b)
1619{
1620 return 0;
1621}
Martin K. Petersen25520d52015-10-21 13:19:49 -04001622static inline void blk_integrity_register(struct gendisk *d,
Stephen Rothwellfd832402012-01-12 09:17:30 +01001623 struct blk_integrity *b)
1624{
Stephen Rothwellfd832402012-01-12 09:17:30 +01001625}
1626static inline void blk_integrity_unregister(struct gendisk *d)
1627{
1628}
1629static inline void blk_queue_max_integrity_segments(struct request_queue *q,
1630 unsigned int segs)
1631{
1632}
1633static inline unsigned short queue_max_integrity_segments(struct request_queue *q)
1634{
1635 return 0;
1636}
Martin K. Petersen4eaf99b2014-09-26 19:20:06 -04001637static inline bool blk_integrity_merge_rq(struct request_queue *rq,
1638 struct request *r1,
1639 struct request *r2)
Stephen Rothwellfd832402012-01-12 09:17:30 +01001640{
Martin K. Petersencb1a5ab2014-10-28 20:27:43 -06001641 return true;
Stephen Rothwellfd832402012-01-12 09:17:30 +01001642}
Martin K. Petersen4eaf99b2014-09-26 19:20:06 -04001643static inline bool blk_integrity_merge_bio(struct request_queue *rq,
1644 struct request *r,
1645 struct bio *b)
Stephen Rothwellfd832402012-01-12 09:17:30 +01001646{
Martin K. Petersencb1a5ab2014-10-28 20:27:43 -06001647 return true;
Stephen Rothwellfd832402012-01-12 09:17:30 +01001648}
Martin K. Petersen25520d52015-10-21 13:19:49 -04001649
Sagi Grimberg7f39add2015-09-11 09:03:04 -06001650static inline bool integrity_req_gap_back_merge(struct request *req,
1651 struct bio *next)
1652{
1653 return false;
1654}
1655static inline bool integrity_req_gap_front_merge(struct request *req,
1656 struct bio *bio)
1657{
1658 return false;
1659}
Martin K. Petersen7ba1ba12008-06-30 20:04:41 +02001660
1661#endif /* CONFIG_BLK_DEV_INTEGRITY */
1662
Dan Williamsb2e0d162016-01-15 16:55:59 -08001663/**
1664 * struct blk_dax_ctl - control and output parameters for ->direct_access
1665 * @sector: (input) offset relative to a block_device
1666 * @addr: (output) kernel virtual address for @sector populated by driver
1667 * @pfn: (output) page frame number for @addr populated by driver
1668 * @size: (input) number of bytes requested
1669 */
1670struct blk_dax_ctl {
1671 sector_t sector;
Dan Williams7a9eb202016-06-03 18:06:47 -07001672 void *addr;
Dan Williamsb2e0d162016-01-15 16:55:59 -08001673 long size;
Dan Williams34c0fd52016-01-15 16:56:14 -08001674 pfn_t pfn;
Dan Williamsb2e0d162016-01-15 16:55:59 -08001675};
1676
Al Viro08f85852007-10-08 13:26:20 -04001677struct block_device_operations {
Al Virod4430d622008-03-02 09:09:22 -05001678 int (*open) (struct block_device *, fmode_t);
Al Virodb2a1442013-05-05 21:52:57 -04001679 void (*release) (struct gendisk *, fmode_t);
Jens Axboec11f0c02016-08-05 08:11:04 -06001680 int (*rw_page)(struct block_device *, sector_t, struct page *, bool);
Al Virod4430d622008-03-02 09:09:22 -05001681 int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
1682 int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
Dan Williams7a9eb202016-06-03 18:06:47 -07001683 long (*direct_access)(struct block_device *, sector_t, void **, pfn_t *,
1684 long);
Tejun Heo77ea8872010-12-08 20:57:37 +01001685 unsigned int (*check_events) (struct gendisk *disk,
1686 unsigned int clearing);
1687 /* ->media_changed() is DEPRECATED, use ->check_events() instead */
Al Viro08f85852007-10-08 13:26:20 -04001688 int (*media_changed) (struct gendisk *);
Tejun Heoc3e33e02010-05-15 20:09:29 +02001689 void (*unlock_native_capacity) (struct gendisk *);
Al Viro08f85852007-10-08 13:26:20 -04001690 int (*revalidate_disk) (struct gendisk *);
1691 int (*getgeo)(struct block_device *, struct hd_geometry *);
Nitin Guptab3a27d02010-05-17 11:02:43 +05301692 /* this callback is with swap_lock and sometimes page table lock held */
1693 void (*swap_slot_free_notify) (struct block_device *, unsigned long);
Al Viro08f85852007-10-08 13:26:20 -04001694 struct module *owner;
Christoph Hellwigbbd3e062015-10-15 14:10:48 +02001695 const struct pr_ops *pr_ops;
Al Viro08f85852007-10-08 13:26:20 -04001696};
1697
Al Viro633a08b2007-08-29 20:34:12 -04001698extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
1699 unsigned long);
Matthew Wilcox47a191f2014-06-04 16:07:46 -07001700extern int bdev_read_page(struct block_device *, sector_t, struct page *);
1701extern int bdev_write_page(struct block_device *, sector_t, struct page *,
1702 struct writeback_control *);
Dan Williamsb2e0d162016-01-15 16:55:59 -08001703extern long bdev_direct_access(struct block_device *, struct blk_dax_ctl *);
Toshi Kani2d96afc2016-05-10 10:23:53 -06001704extern int bdev_dax_supported(struct super_block *, int);
Toshi Kania8078b12016-05-10 10:23:57 -06001705extern bool bdev_dax_capable(struct block_device *);
Mohan Srinivasane2d88782016-12-14 15:55:36 -08001706
1707/*
1708 * X-axis for IO latency histogram support.
1709 */
1710static const u_int64_t latency_x_axis_us[] = {
1711 100,
1712 200,
1713 300,
1714 400,
1715 500,
1716 600,
1717 700,
1718 800,
1719 900,
1720 1000,
1721 1200,
1722 1400,
1723 1600,
1724 1800,
1725 2000,
1726 2500,
1727 3000,
1728 4000,
1729 5000,
1730 6000,
1731 7000,
1732 9000,
1733 10000
1734};
1735
1736#define BLK_IO_LAT_HIST_DISABLE 0
1737#define BLK_IO_LAT_HIST_ENABLE 1
1738#define BLK_IO_LAT_HIST_ZERO 2
1739
1740struct io_latency_state {
Hyojun Kim11537d52017-12-21 09:57:41 -08001741 u_int64_t latency_y_axis[ARRAY_SIZE(latency_x_axis_us) + 1];
1742 u_int64_t latency_elems;
1743 u_int64_t latency_sum;
Mohan Srinivasane2d88782016-12-14 15:55:36 -08001744};
1745
1746static inline void
Hyojun Kim11537d52017-12-21 09:57:41 -08001747blk_update_latency_hist(struct io_latency_state *s, u_int64_t delta_us)
Mohan Srinivasane2d88782016-12-14 15:55:36 -08001748{
1749 int i;
1750
Hyojun Kim11537d52017-12-21 09:57:41 -08001751 for (i = 0; i < ARRAY_SIZE(latency_x_axis_us); i++)
1752 if (delta_us < (u_int64_t)latency_x_axis_us[i])
Mohan Srinivasane2d88782016-12-14 15:55:36 -08001753 break;
Hyojun Kim11537d52017-12-21 09:57:41 -08001754 s->latency_y_axis[i]++;
1755 s->latency_elems++;
1756 s->latency_sum += delta_us;
Mohan Srinivasane2d88782016-12-14 15:55:36 -08001757}
1758
Hyojun Kim11537d52017-12-21 09:57:41 -08001759ssize_t blk_latency_hist_show(char* name, struct io_latency_state *s,
1760 char *buf, int buf_size);
Mohan Srinivasane2d88782016-12-14 15:55:36 -08001761
David Howells93614012006-09-30 20:45:40 +02001762#else /* CONFIG_BLOCK */
Fabian Frederickac13a822014-06-04 16:06:27 -07001763
1764struct block_device;
1765
David Howells93614012006-09-30 20:45:40 +02001766/*
1767 * stubs for when the block layer is configured out
1768 */
1769#define buffer_heads_over_limit 0
1770
David Howells93614012006-09-30 20:45:40 +02001771static inline long nr_blockdev_pages(void)
1772{
1773 return 0;
1774}
1775
Jens Axboe1f940bd2011-03-11 20:17:08 +01001776struct blk_plug {
1777};
1778
1779static inline void blk_start_plug(struct blk_plug *plug)
Jens Axboe73c10102011-03-08 13:19:51 +01001780{
1781}
1782
Jens Axboe1f940bd2011-03-11 20:17:08 +01001783static inline void blk_finish_plug(struct blk_plug *plug)
Jens Axboe73c10102011-03-08 13:19:51 +01001784{
1785}
1786
Jens Axboe1f940bd2011-03-11 20:17:08 +01001787static inline void blk_flush_plug(struct task_struct *task)
Jens Axboe73c10102011-03-08 13:19:51 +01001788{
1789}
1790
Jens Axboea237c1c2011-04-16 13:27:55 +02001791static inline void blk_schedule_flush_plug(struct task_struct *task)
1792{
1793}
1794
1795
Jens Axboe73c10102011-03-08 13:19:51 +01001796static inline bool blk_needs_flush_plug(struct task_struct *tsk)
1797{
1798 return false;
1799}
1800
Fabian Frederickac13a822014-06-04 16:06:27 -07001801static inline int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask,
1802 sector_t *error_sector)
1803{
1804 return 0;
1805}
1806
David Howells93614012006-09-30 20:45:40 +02001807#endif /* CONFIG_BLOCK */
1808
Linus Torvalds1da177e2005-04-16 15:20:36 -07001809#endif