blob: fb910c6343829e1dd7f9edf0942350fadda614ba [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001#ifndef _LINUX_BLKDEV_H
2#define _LINUX_BLKDEV_H
3
Russell King85fd0bc2012-05-14 08:29:23 +02004#include <linux/sched.h>
5
Jens Axboef5ff8422007-09-21 09:19:54 +02006#ifdef CONFIG_BLOCK
7
Linus Torvalds1da177e2005-04-16 15:20:36 -07008#include <linux/major.h>
9#include <linux/genhd.h>
10#include <linux/list.h>
Jens Axboe320ae512013-10-24 09:20:05 +010011#include <linux/llist.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070012#include <linux/timer.h>
13#include <linux/workqueue.h>
14#include <linux/pagemap.h>
Tejun Heo66114ca2015-05-22 17:13:32 -040015#include <linux/backing-dev-defs.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070016#include <linux/wait.h>
17#include <linux/mempool.h>
Dan Williams34c0fd52016-01-15 16:56:14 -080018#include <linux/pfn.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070019#include <linux/bio.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070020#include <linux/stringify.h>
Hugh Dickins3e6053d2008-09-11 10:57:55 +020021#include <linux/gfp.h>
FUJITA Tomonorid351af02007-07-09 12:40:35 +020022#include <linux/bsg.h>
Jens Axboec7c22e42008-09-13 20:26:01 +020023#include <linux/smp.h>
Tejun Heo548bc8e2013-01-09 08:05:13 -080024#include <linux/rcupdate.h>
Tejun Heoadd703f2014-07-01 10:34:38 -060025#include <linux/percpu-refcount.h>
Christoph Hellwig84be4562015-05-01 12:46:15 +020026#include <linux/scatterlist.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070027
Paul Gortmakerde477252011-05-26 13:46:22 -040028struct module;
Christoph Hellwig21b2f0c2006-03-22 17:52:04 +010029struct scsi_ioctl_command;
30
Linus Torvalds1da177e2005-04-16 15:20:36 -070031struct request_queue;
Linus Torvalds1da177e2005-04-16 15:20:36 -070032struct elevator_queue;
Jens Axboe2056a782006-03-23 20:00:26 +010033struct blk_trace;
Jens Axboe3d6392c2007-07-09 12:38:05 +020034struct request;
35struct sg_io_hdr;
Mike Christieaa387cc2011-07-31 22:05:09 +020036struct bsg_job;
Tejun Heo3c798392012-04-16 13:57:25 -070037struct blkcg_gq;
Ming Lei7c94e1c2014-09-25 23:23:43 +080038struct blk_flush_queue;
Christoph Hellwigbbd3e062015-10-15 14:10:48 +020039struct pr_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -070040
41#define BLKDEV_MIN_RQ 4
42#define BLKDEV_MAX_RQ 128 /* Default maximum */
43
Tejun Heo8bd435b2012-04-13 13:11:28 -070044/*
45 * Maximum number of blkcg policies allowed to be registered concurrently.
46 * Defined here to simplify include dependency.
47 */
48#define BLKCG_MAX_POLS 2
49
Tejun Heo8ffdc652006-01-06 09:49:03 +010050typedef void (rq_end_io_fn)(struct request *, int);
Linus Torvalds1da177e2005-04-16 15:20:36 -070051
Tejun Heo5b788ce2012-06-04 20:40:59 -070052#define BLK_RL_SYNCFULL (1U << 0)
53#define BLK_RL_ASYNCFULL (1U << 1)
54
Linus Torvalds1da177e2005-04-16 15:20:36 -070055struct request_list {
Tejun Heo5b788ce2012-06-04 20:40:59 -070056 struct request_queue *q; /* the queue this rl belongs to */
Tejun Heoa0516612012-06-26 15:05:44 -070057#ifdef CONFIG_BLK_CGROUP
58 struct blkcg_gq *blkg; /* blkg this request pool belongs to */
59#endif
Jens Axboe1faa16d2009-04-06 14:48:01 +020060 /*
61 * count[], starved[], and wait[] are indexed by
62 * BLK_RW_SYNC/BLK_RW_ASYNC
63 */
Tejun Heo8a5ecdd2012-06-04 20:40:58 -070064 int count[2];
65 int starved[2];
66 mempool_t *rq_pool;
67 wait_queue_head_t wait[2];
Tejun Heo5b788ce2012-06-04 20:40:59 -070068 unsigned int flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -070069};
70
Jens Axboe4aff5e22006-08-10 08:44:47 +020071/*
72 * request command types
73 */
74enum rq_cmd_type_bits {
75 REQ_TYPE_FS = 1, /* fs request */
76 REQ_TYPE_BLOCK_PC, /* scsi command */
Christoph Hellwigb42171e2015-04-17 22:37:17 +020077 REQ_TYPE_DRV_PRIV, /* driver defined types from here */
Jens Axboe4aff5e22006-08-10 08:44:47 +020078};
79
Linus Torvalds1da177e2005-04-16 15:20:36 -070080#define BLK_MAX_CDB 16
81
82/*
Christoph Hellwigaf76e552014-05-06 12:12:45 +020083 * Try to put the fields that are referenced together in the same cacheline.
84 *
85 * If you modify this structure, make sure to update blk_rq_init() and
86 * especially blk_mq_rq_ctx_init() to take care of the added fields.
Linus Torvalds1da177e2005-04-16 15:20:36 -070087 */
88struct request {
Christoph Hellwig6897fc22014-01-30 15:45:47 -080089 struct list_head queuelist;
Jens Axboe320ae512013-10-24 09:20:05 +010090 union {
91 struct call_single_data csd;
Jan Kara9828c2c2016-06-28 09:03:59 +020092 u64 fifo_time;
Jens Axboe320ae512013-10-24 09:20:05 +010093 };
Jens Axboeff856ba2006-01-09 16:02:34 +010094
Jens Axboe165125e2007-07-24 09:28:11 +020095 struct request_queue *q;
Jens Axboe320ae512013-10-24 09:20:05 +010096 struct blk_mq_ctx *mq_ctx;
Jens Axboee6a1c872006-08-10 09:00:21 +020097
Richard Kennedy181fdde2010-03-19 08:58:16 +010098 int cpu;
Christoph Hellwigca93e452016-06-09 16:00:35 +020099 unsigned cmd_type;
100 u64 cmd_flags;
101 unsigned long atomic_flags;
Richard Kennedy181fdde2010-03-19 08:58:16 +0100102
Tejun Heoa2dec7b2009-05-07 22:24:44 +0900103 /* the following two fields are internal, NEVER access directly */
Tejun Heoa2dec7b2009-05-07 22:24:44 +0900104 unsigned int __data_len; /* total data len */
Richard Kennedy181fdde2010-03-19 08:58:16 +0100105 sector_t __sector; /* sector cursor */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700106
107 struct bio *bio;
108 struct bio *biotail;
109
Jens Axboe360f92c2014-04-09 20:27:01 -0600110 /*
111 * The hash is used inside the scheduler, and killed once the
112 * request reaches the dispatch list. The ipi_list is only used
113 * to queue the request for softirq completion, which is long
114 * after the request has been unhashed (and even removed from
115 * the dispatch list).
116 */
117 union {
118 struct hlist_node hash; /* merge hash */
119 struct list_head ipi_list;
120 };
121
Jens Axboee6a1c872006-08-10 09:00:21 +0200122 /*
123 * The rb_node is only used inside the io scheduler, requests
124 * are pruned when moved to the dispatch queue. So let the
Mike Snitzerc1867942011-02-11 11:08:00 +0100125 * completion_data share space with the rb_node.
Jens Axboee6a1c872006-08-10 09:00:21 +0200126 */
127 union {
128 struct rb_node rb_node; /* sort/lookup */
Mike Snitzerc1867942011-02-11 11:08:00 +0100129 void *completion_data;
Jens Axboee6a1c872006-08-10 09:00:21 +0200130 };
Jens Axboe98170642006-07-28 09:23:08 +0200131
Jens Axboeff7d1452006-07-12 14:04:37 +0200132 /*
Vivek Goyal7f1dc8a2010-04-21 17:44:16 +0200133 * Three pointers are available for the IO schedulers, if they need
Mike Snitzerc1867942011-02-11 11:08:00 +0100134 * more they have to dynamically allocate it. Flush requests are
135 * never put on the IO scheduler. So let the flush fields share
Tejun Heoa612fdd2011-12-14 00:33:41 +0100136 * space with the elevator data.
Jens Axboeff7d1452006-07-12 14:04:37 +0200137 */
Mike Snitzerc1867942011-02-11 11:08:00 +0100138 union {
Tejun Heoa612fdd2011-12-14 00:33:41 +0100139 struct {
140 struct io_cq *icq;
141 void *priv[2];
142 } elv;
143
Mike Snitzerc1867942011-02-11 11:08:00 +0100144 struct {
145 unsigned int seq;
146 struct list_head list;
Jeff Moyer4853aba2011-08-15 21:37:25 +0200147 rq_end_io_fn *saved_end_io;
Mike Snitzerc1867942011-02-11 11:08:00 +0100148 } flush;
149 };
Jens Axboeff7d1452006-07-12 14:04:37 +0200150
Jens Axboe8f34ee72006-06-13 09:02:34 +0200151 struct gendisk *rq_disk;
Jerome Marchand09e099d2011-01-05 16:57:38 +0100152 struct hd_struct *part;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700153 unsigned long start_time;
Divyesh Shah91952912010-04-01 15:01:41 -0700154#ifdef CONFIG_BLK_CGROUP
Tejun Heoa0516612012-06-26 15:05:44 -0700155 struct request_list *rl; /* rl this rq is alloced from */
Divyesh Shah91952912010-04-01 15:01:41 -0700156 unsigned long long start_time_ns;
157 unsigned long long io_start_time_ns; /* when passed to hardware */
158#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700159 /* Number of scatter-gather DMA addr+len pairs after
160 * physical address coalescing is performed.
161 */
162 unsigned short nr_phys_segments;
Martin K. Petersen13f05c82010-09-10 20:50:10 +0200163#if defined(CONFIG_BLK_DEV_INTEGRITY)
164 unsigned short nr_integrity_segments;
165#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700166
Jens Axboe8f34ee72006-06-13 09:02:34 +0200167 unsigned short ioprio;
168
Tejun Heo731ec492009-04-23 11:05:20 +0900169 void *special; /* opaque pointer available for LLD use */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700170
Jens Axboecdd60262006-07-28 09:32:07 +0200171 int tag;
172 int errors;
173
Linus Torvalds1da177e2005-04-16 15:20:36 -0700174 /*
175 * when request is used as a packet command carrier
176 */
FUJITA Tomonorid7e3c322008-04-29 09:54:39 +0200177 unsigned char __cmd[BLK_MAX_CDB];
178 unsigned char *cmd;
Richard Kennedy181fdde2010-03-19 08:58:16 +0100179 unsigned short cmd_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700180
FUJITA Tomonori7a85f882008-03-04 11:17:11 +0100181 unsigned int extra_len; /* length of alignment and padding */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700182 unsigned int sense_len;
Tejun Heoc3a4d782009-05-07 22:24:37 +0900183 unsigned int resid_len; /* residual count */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700184 void *sense;
185
Jens Axboe242f9dc2008-09-14 05:55:09 -0700186 unsigned long deadline;
187 struct list_head timeout_list;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700188 unsigned int timeout;
Mike Christie17e01f22005-11-11 05:31:37 -0600189 int retries;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700190
191 /*
Jens Axboec00895a2006-09-30 20:29:12 +0200192 * completion callback.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700193 */
194 rq_end_io_fn *end_io;
195 void *end_io_data;
FUJITA Tomonoriabae1fd2007-07-16 08:52:14 +0200196
197 /* for bidi */
198 struct request *next_rq;
Mohan Srinivasane2d88782016-12-14 15:55:36 -0800199
200 ktime_t lat_hist_io_start;
201 int lat_hist_enabled;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700202};
203
Mike Christie4e1b2d522016-06-05 14:32:22 -0500204#define REQ_OP_SHIFT (8 * sizeof(u64) - REQ_OP_BITS)
205#define req_op(req) ((req)->cmd_flags >> REQ_OP_SHIFT)
206
207#define req_set_op(req, op) do { \
208 WARN_ON(op >= (1 << REQ_OP_BITS)); \
209 (req)->cmd_flags &= ((1ULL << REQ_OP_SHIFT) - 1); \
210 (req)->cmd_flags |= ((u64) (op) << REQ_OP_SHIFT); \
211} while (0)
212
Mike Christief2150822016-06-05 14:31:42 -0500213#define req_set_op_attrs(req, op, flags) do { \
214 req_set_op(req, op); \
215 (req)->cmd_flags |= flags; \
216} while (0)
217
Fernando Luis Vázquez Cao766ca442008-08-14 09:59:13 +0200218static inline unsigned short req_get_ioprio(struct request *req)
219{
220 return req->ioprio;
221}
222
Linus Torvalds1da177e2005-04-16 15:20:36 -0700223#include <linux/elevator.h>
224
Jens Axboe320ae512013-10-24 09:20:05 +0100225struct blk_queue_ctx;
226
Jens Axboe165125e2007-07-24 09:28:11 +0200227typedef void (request_fn_proc) (struct request_queue *q);
Jens Axboedece1632015-11-05 10:41:16 -0700228typedef blk_qc_t (make_request_fn) (struct request_queue *q, struct bio *bio);
Jens Axboe165125e2007-07-24 09:28:11 +0200229typedef int (prep_rq_fn) (struct request_queue *, struct request *);
James Bottomley28018c22010-07-01 19:49:17 +0900230typedef void (unprep_rq_fn) (struct request_queue *, struct request *);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700231
232struct bio_vec;
Jens Axboeff856ba2006-01-09 16:02:34 +0100233typedef void (softirq_done_fn)(struct request *);
Tejun Heo2fb98e82008-02-19 11:36:53 +0100234typedef int (dma_drain_needed_fn)(struct request *);
Kiyoshi Uedaef9e3fa2008-10-01 16:12:15 +0200235typedef int (lld_busy_fn) (struct request_queue *q);
Mike Christieaa387cc2011-07-31 22:05:09 +0200236typedef int (bsg_job_fn) (struct bsg_job *);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700237
Jens Axboe242f9dc2008-09-14 05:55:09 -0700238enum blk_eh_timer_return {
239 BLK_EH_NOT_HANDLED,
240 BLK_EH_HANDLED,
241 BLK_EH_RESET_TIMER,
242};
243
244typedef enum blk_eh_timer_return (rq_timed_out_fn)(struct request *);
245
Linus Torvalds1da177e2005-04-16 15:20:36 -0700246enum blk_queue_state {
247 Queue_down,
248 Queue_up,
249};
250
Linus Torvalds1da177e2005-04-16 15:20:36 -0700251struct blk_queue_tag {
252 struct request **tag_index; /* map of busy tags */
253 unsigned long *tag_map; /* bit map of free/busy tags */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700254 int busy; /* current depth */
255 int max_depth; /* what we will send to device */
Tejun Heoba025082005-08-05 13:28:11 -0700256 int real_max_depth; /* what the array can hold */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700257 atomic_t refcnt; /* map can be shared */
Shaohua Liee1b6f72015-01-15 17:32:25 -0800258 int alloc_policy; /* tag allocation policy */
259 int next_tag; /* next tag */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700260};
Shaohua Liee1b6f72015-01-15 17:32:25 -0800261#define BLK_TAG_ALLOC_FIFO 0 /* allocate starting from 0 */
262#define BLK_TAG_ALLOC_RR 1 /* allocate starting from last allocated tag */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700263
FUJITA Tomonoriabf54392008-08-16 14:10:05 +0900264#define BLK_SCSI_MAX_CMDS (256)
265#define BLK_SCSI_CMD_PER_LONG (BLK_SCSI_MAX_CMDS / (sizeof(long) * 8))
266
Martin K. Petersen025146e2009-05-22 17:17:51 -0400267struct queue_limits {
268 unsigned long bounce_pfn;
269 unsigned long seg_boundary_mask;
Keith Busch03100aa2015-08-19 14:24:05 -0700270 unsigned long virt_boundary_mask;
Martin K. Petersen025146e2009-05-22 17:17:51 -0400271
272 unsigned int max_hw_sectors;
Martin K. Petersenca369d52015-11-13 16:46:48 -0500273 unsigned int max_dev_sectors;
Jens Axboe762380a2014-06-05 13:38:39 -0600274 unsigned int chunk_sectors;
Martin K. Petersen025146e2009-05-22 17:17:51 -0400275 unsigned int max_sectors;
276 unsigned int max_segment_size;
Martin K. Petersenc72758f2009-05-22 17:17:53 -0400277 unsigned int physical_block_size;
278 unsigned int alignment_offset;
279 unsigned int io_min;
280 unsigned int io_opt;
Christoph Hellwig67efc922009-09-30 13:54:20 +0200281 unsigned int max_discard_sectors;
Jens Axboe0034af02015-07-16 09:14:26 -0600282 unsigned int max_hw_discard_sectors;
Martin K. Petersen4363ac72012-09-18 12:19:27 -0400283 unsigned int max_write_same_sectors;
Martin K. Petersen86b37282009-11-10 11:50:21 +0100284 unsigned int discard_granularity;
285 unsigned int discard_alignment;
Martin K. Petersen025146e2009-05-22 17:17:51 -0400286
287 unsigned short logical_block_size;
Martin K. Petersen8a783622010-02-26 00:20:39 -0500288 unsigned short max_segments;
Martin K. Petersen13f05c82010-09-10 20:50:10 +0200289 unsigned short max_integrity_segments;
Martin K. Petersen025146e2009-05-22 17:17:51 -0400290
Martin K. Petersenc72758f2009-05-22 17:17:53 -0400291 unsigned char misaligned;
Martin K. Petersen86b37282009-11-10 11:50:21 +0100292 unsigned char discard_misaligned;
Martin K. Petersene692cb62010-12-01 19:41:49 +0100293 unsigned char cluster;
Martin K. Petersena934a002011-05-18 10:37:35 +0200294 unsigned char discard_zeroes_data;
Kent Overstreetc78afc62013-07-11 22:39:53 -0700295 unsigned char raid_partial_stripes_expensive;
Martin K. Petersen025146e2009-05-22 17:17:51 -0400296};
297
Richard Kennedyd7b76302011-07-13 21:17:23 +0200298struct request_queue {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700299 /*
300 * Together with queue_head for cacheline sharing
301 */
302 struct list_head queue_head;
303 struct request *last_merge;
Jens Axboeb374d182008-10-31 10:05:07 +0100304 struct elevator_queue *elevator;
Tejun Heo8a5ecdd2012-06-04 20:40:58 -0700305 int nr_rqs[2]; /* # allocated [a]sync rqs */
306 int nr_rqs_elvpriv; /* # allocated rqs w/ elvpriv */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700307
308 /*
Tejun Heoa0516612012-06-26 15:05:44 -0700309 * If blkcg is not used, @q->root_rl serves all requests. If blkcg
310 * is used, root blkg allocates from @q->root_rl and all other
311 * blkgs from their own blkg->rl. Which one to use should be
312 * determined using bio_request_list().
Linus Torvalds1da177e2005-04-16 15:20:36 -0700313 */
Tejun Heoa0516612012-06-26 15:05:44 -0700314 struct request_list root_rl;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700315
316 request_fn_proc *request_fn;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700317 make_request_fn *make_request_fn;
318 prep_rq_fn *prep_rq_fn;
James Bottomley28018c22010-07-01 19:49:17 +0900319 unprep_rq_fn *unprep_rq_fn;
Jens Axboeff856ba2006-01-09 16:02:34 +0100320 softirq_done_fn *softirq_done_fn;
Jens Axboe242f9dc2008-09-14 05:55:09 -0700321 rq_timed_out_fn *rq_timed_out_fn;
Tejun Heo2fb98e82008-02-19 11:36:53 +0100322 dma_drain_needed_fn *dma_drain_needed;
Kiyoshi Uedaef9e3fa2008-10-01 16:12:15 +0200323 lld_busy_fn *lld_busy_fn;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700324
Jens Axboe320ae512013-10-24 09:20:05 +0100325 struct blk_mq_ops *mq_ops;
326
327 unsigned int *mq_map;
328
329 /* sw queues */
Ming Leie6cdb092014-06-03 11:24:06 +0800330 struct blk_mq_ctx __percpu *queue_ctx;
Jens Axboe320ae512013-10-24 09:20:05 +0100331 unsigned int nr_queues;
332
333 /* hw dispatch queues */
334 struct blk_mq_hw_ctx **queue_hw_ctx;
335 unsigned int nr_hw_queues;
336
Linus Torvalds1da177e2005-04-16 15:20:36 -0700337 /*
Tejun Heo8922e162005-10-20 16:23:44 +0200338 * Dispatch queue sorting
339 */
Jens Axboe1b47f532005-10-20 16:37:00 +0200340 sector_t end_sector;
Tejun Heo8922e162005-10-20 16:23:44 +0200341 struct request *boundary_rq;
Tejun Heo8922e162005-10-20 16:23:44 +0200342
343 /*
Jens Axboe3cca6dc2011-03-02 11:08:00 -0500344 * Delayed queue handling
Linus Torvalds1da177e2005-04-16 15:20:36 -0700345 */
Jens Axboe3cca6dc2011-03-02 11:08:00 -0500346 struct delayed_work delay_work;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700347
348 struct backing_dev_info backing_dev_info;
349
350 /*
351 * The queue owner gets to use this for whatever they like.
352 * ll_rw_blk doesn't touch it.
353 */
354 void *queuedata;
355
Linus Torvalds1da177e2005-04-16 15:20:36 -0700356 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700357 * various queue flags, see QUEUE_* below
358 */
359 unsigned long queue_flags;
360
361 /*
Tejun Heoa73f7302011-12-14 00:33:37 +0100362 * ida allocated id for this queue. Used to index queues from
363 * ioctx.
364 */
365 int id;
366
367 /*
Richard Kennedyd7b76302011-07-13 21:17:23 +0200368 * queue needs bounce pages for pages above this limit
369 */
370 gfp_t bounce_gfp;
371
372 /*
152587d2005-04-12 16:22:06 -0500373 * protects queue structures from reentrancy. ->__queue_lock should
374 * _never_ be used directly, it is queue private. always use
375 * ->queue_lock.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700376 */
152587d2005-04-12 16:22:06 -0500377 spinlock_t __queue_lock;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700378 spinlock_t *queue_lock;
379
380 /*
381 * queue kobject
382 */
383 struct kobject kobj;
384
Jens Axboe320ae512013-10-24 09:20:05 +0100385 /*
386 * mq queue kobject
387 */
388 struct kobject mq_kobj;
389
Dan Williamsac6fc482015-10-21 13:20:18 -0400390#ifdef CONFIG_BLK_DEV_INTEGRITY
391 struct blk_integrity integrity;
392#endif /* CONFIG_BLK_DEV_INTEGRITY */
393
Rafael J. Wysocki47fafbc2014-12-04 01:00:23 +0100394#ifdef CONFIG_PM
Lin Ming6c954662013-03-23 11:42:26 +0800395 struct device *dev;
396 int rpm_status;
397 unsigned int nr_pending;
398#endif
399
Linus Torvalds1da177e2005-04-16 15:20:36 -0700400 /*
401 * queue settings
402 */
403 unsigned long nr_requests; /* Max # of requests */
404 unsigned int nr_congestion_on;
405 unsigned int nr_congestion_off;
406 unsigned int nr_batching;
407
James Bottomleyfa0ccd82008-01-10 11:30:36 -0600408 unsigned int dma_drain_size;
Richard Kennedyd7b76302011-07-13 21:17:23 +0200409 void *dma_drain_buffer;
Tejun Heoe3790c72008-03-04 11:18:17 +0100410 unsigned int dma_pad_mask;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700411 unsigned int dma_alignment;
412
413 struct blk_queue_tag *queue_tags;
Jens Axboe6eca9002007-10-25 10:14:47 +0200414 struct list_head tag_busy_list;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700415
Tejun Heo15853af2005-11-10 08:52:05 +0100416 unsigned int nr_sorted;
Jens Axboe0a7ae2f2009-05-20 08:54:31 +0200417 unsigned int in_flight[2];
Bart Van Assche24faf6f2012-11-28 13:46:45 +0100418 /*
419 * Number of active block driver functions for which blk_drain_queue()
420 * must wait. Must be incremented around functions that unlock the
421 * queue_lock internally, e.g. scsi_request_fn().
422 */
423 unsigned int request_fn_active;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700424
Jens Axboe242f9dc2008-09-14 05:55:09 -0700425 unsigned int rq_timeout;
426 struct timer_list timeout;
Christoph Hellwig287922e2015-10-30 20:57:30 +0800427 struct work_struct timeout_work;
Jens Axboe242f9dc2008-09-14 05:55:09 -0700428 struct list_head timeout_list;
429
Tejun Heoa612fdd2011-12-14 00:33:41 +0100430 struct list_head icq_list;
Tejun Heo4eef3042012-03-05 13:15:18 -0800431#ifdef CONFIG_BLK_CGROUP
Tejun Heoa2b16932012-04-13 13:11:33 -0700432 DECLARE_BITMAP (blkcg_pols, BLKCG_MAX_POLS);
Tejun Heo3c798392012-04-16 13:57:25 -0700433 struct blkcg_gq *root_blkg;
Tejun Heo03aa2642012-03-05 13:15:19 -0800434 struct list_head blkg_list;
Tejun Heo4eef3042012-03-05 13:15:18 -0800435#endif
Tejun Heoa612fdd2011-12-14 00:33:41 +0100436
Martin K. Petersen025146e2009-05-22 17:17:51 -0400437 struct queue_limits limits;
438
Linus Torvalds1da177e2005-04-16 15:20:36 -0700439 /*
440 * sg stuff
441 */
442 unsigned int sg_timeout;
443 unsigned int sg_reserved_size;
Christoph Lameter19460892005-06-23 00:08:19 -0700444 int node;
Alexey Dobriyan6c5c9342006-09-29 01:59:40 -0700445#ifdef CONFIG_BLK_DEV_IO_TRACE
Jens Axboe2056a782006-03-23 20:00:26 +0100446 struct blk_trace *blk_trace;
Alexey Dobriyan6c5c9342006-09-29 01:59:40 -0700447#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700448 /*
Tejun Heo4913efe2010-09-03 11:56:16 +0200449 * for flush operations
Linus Torvalds1da177e2005-04-16 15:20:36 -0700450 */
Ming Lei7c94e1c2014-09-25 23:23:43 +0800451 struct blk_flush_queue *fq;
Al Viro483f4af2006-03-18 18:34:37 -0500452
Christoph Hellwig6fca6a62014-05-28 08:08:02 -0600453 struct list_head requeue_list;
454 spinlock_t requeue_lock;
Mike Snitzer28494502016-09-14 13:28:30 -0400455 struct delayed_work requeue_work;
Christoph Hellwig6fca6a62014-05-28 08:08:02 -0600456
Al Viro483f4af2006-03-18 18:34:37 -0500457 struct mutex sysfs_lock;
FUJITA Tomonorid351af02007-07-09 12:40:35 +0200458
Tejun Heod7325802012-03-05 13:14:58 -0800459 int bypass_depth;
Christoph Hellwig4ecd4fe2015-05-07 09:38:13 +0200460 atomic_t mq_freeze_depth;
Tejun Heod7325802012-03-05 13:14:58 -0800461
FUJITA Tomonorid351af02007-07-09 12:40:35 +0200462#if defined(CONFIG_BLK_DEV_BSG)
Mike Christieaa387cc2011-07-31 22:05:09 +0200463 bsg_job_fn *bsg_job_fn;
464 int bsg_job_size;
FUJITA Tomonorid351af02007-07-09 12:40:35 +0200465 struct bsg_class_device bsg_dev;
466#endif
Vivek Goyale43473b2010-09-15 17:06:35 -0400467
468#ifdef CONFIG_BLK_DEV_THROTTLING
469 /* Throttle data */
470 struct throtl_data *td;
471#endif
Tejun Heo548bc8e2013-01-09 08:05:13 -0800472 struct rcu_head rcu_head;
Jens Axboe320ae512013-10-24 09:20:05 +0100473 wait_queue_head_t mq_freeze_wq;
Dan Williams3ef28e82015-10-21 13:20:12 -0400474 struct percpu_ref q_usage_counter;
Jens Axboe320ae512013-10-24 09:20:05 +0100475 struct list_head all_q_node;
Jens Axboe0d2602c2014-05-13 15:10:52 -0600476
477 struct blk_mq_tag_set *tag_set;
478 struct list_head tag_set_list;
Kent Overstreet54efd502015-04-23 22:37:18 -0700479 struct bio_set *bio_split;
Akinobu Mita4593fdb2015-09-27 02:09:20 +0900480
481 bool mq_sysfs_init_done;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700482};
483
Linus Torvalds1da177e2005-04-16 15:20:36 -0700484#define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */
485#define QUEUE_FLAG_STOPPED 2 /* queue is stopped */
Jens Axboe1faa16d2009-04-06 14:48:01 +0200486#define QUEUE_FLAG_SYNCFULL 3 /* read queue has been filled */
487#define QUEUE_FLAG_ASYNCFULL 4 /* write queue has been filled */
Bart Van Assche3f3299d2012-11-28 13:42:38 +0100488#define QUEUE_FLAG_DYING 5 /* queue being torn down */
Tejun Heod7325802012-03-05 13:14:58 -0800489#define QUEUE_FLAG_BYPASS 6 /* act as dumb FIFO queue */
Jens Axboec21e6be2011-04-19 13:32:46 +0200490#define QUEUE_FLAG_BIDI 7 /* queue supports bidi requests */
491#define QUEUE_FLAG_NOMERGES 8 /* disable merge attempts */
Dan Williams5757a6d2011-07-23 20:44:25 +0200492#define QUEUE_FLAG_SAME_COMP 9 /* complete on same CPU-group */
Jens Axboec21e6be2011-04-19 13:32:46 +0200493#define QUEUE_FLAG_FAIL_IO 10 /* fake timeout */
494#define QUEUE_FLAG_STACKABLE 11 /* supports request stacking */
495#define QUEUE_FLAG_NONROT 12 /* non-rotational device (SSD) */
Fernando Luis Vázquez Cao88e740f2008-10-27 18:44:46 +0900496#define QUEUE_FLAG_VIRT QUEUE_FLAG_NONROT /* paravirt device */
Jens Axboec21e6be2011-04-19 13:32:46 +0200497#define QUEUE_FLAG_IO_STAT 13 /* do IO stats */
498#define QUEUE_FLAG_DISCARD 14 /* supports DISCARD */
499#define QUEUE_FLAG_NOXMERGES 15 /* No extended merges */
500#define QUEUE_FLAG_ADD_RANDOM 16 /* Contributes to random pool */
Christoph Hellwig288dab82016-06-09 16:00:36 +0200501#define QUEUE_FLAG_SECERASE 17 /* supports secure erase */
Dan Williams5757a6d2011-07-23 20:44:25 +0200502#define QUEUE_FLAG_SAME_FORCE 18 /* force complete on same CPU */
Bart Van Asschec246e802012-12-06 14:32:01 +0100503#define QUEUE_FLAG_DEAD 19 /* queue tear-down finished */
Jens Axboe320ae512013-10-24 09:20:05 +0100504#define QUEUE_FLAG_INIT_DONE 20 /* queue is initialized */
Jens Axboe05f1dd52014-05-29 09:53:32 -0600505#define QUEUE_FLAG_NO_SG_MERGE 21 /* don't attempt to merge SG segments*/
Jens Axboe05229be2015-11-05 10:44:55 -0700506#define QUEUE_FLAG_POLL 22 /* IO polling enabled if set */
Jens Axboe93e9d8e2016-04-12 12:32:46 -0600507#define QUEUE_FLAG_WC 23 /* Write back caching */
508#define QUEUE_FLAG_FUA 24 /* device supports FUA writes */
Jens Axboec888a8f2016-04-13 13:33:19 -0600509#define QUEUE_FLAG_FLUSH_NQ 25 /* flush not queueuable */
Toshi Kani163d4ba2016-06-23 17:05:50 -0400510#define QUEUE_FLAG_DAX 26 /* device supports DAX */
Jens Axboebc58ba92009-01-23 10:54:44 +0100511
512#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \
Jens Axboe01e97f62009-09-03 20:06:47 +0200513 (1 << QUEUE_FLAG_STACKABLE) | \
Jens Axboee2e1a142010-06-09 10:42:09 +0200514 (1 << QUEUE_FLAG_SAME_COMP) | \
515 (1 << QUEUE_FLAG_ADD_RANDOM))
Tejun Heo797e7db2006-01-06 09:51:03 +0100516
Jens Axboe94eddfb2013-11-19 09:25:07 -0700517#define QUEUE_FLAG_MQ_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \
Mike Snitzerad9cf3b2014-12-16 12:54:25 -0500518 (1 << QUEUE_FLAG_STACKABLE) | \
Christoph Hellwig8e0b60b2016-03-03 16:04:03 +0100519 (1 << QUEUE_FLAG_SAME_COMP) | \
520 (1 << QUEUE_FLAG_POLL))
Jens Axboe94eddfb2013-11-19 09:25:07 -0700521
Andi Kleen8bcb6c72012-03-30 12:33:28 +0200522static inline void queue_lockdep_assert_held(struct request_queue *q)
Linus Torvalds8f45c1a2008-04-29 10:16:38 -0700523{
Andi Kleen8bcb6c72012-03-30 12:33:28 +0200524 if (q->queue_lock)
525 lockdep_assert_held(q->queue_lock);
Linus Torvalds8f45c1a2008-04-29 10:16:38 -0700526}
527
Nick Piggin75ad23b2008-04-29 14:48:33 +0200528static inline void queue_flag_set_unlocked(unsigned int flag,
529 struct request_queue *q)
530{
531 __set_bit(flag, &q->queue_flags);
532}
533
Jens Axboee48ec692008-07-03 13:18:54 +0200534static inline int queue_flag_test_and_clear(unsigned int flag,
535 struct request_queue *q)
536{
Andi Kleen8bcb6c72012-03-30 12:33:28 +0200537 queue_lockdep_assert_held(q);
Jens Axboee48ec692008-07-03 13:18:54 +0200538
539 if (test_bit(flag, &q->queue_flags)) {
540 __clear_bit(flag, &q->queue_flags);
541 return 1;
542 }
543
544 return 0;
545}
546
547static inline int queue_flag_test_and_set(unsigned int flag,
548 struct request_queue *q)
549{
Andi Kleen8bcb6c72012-03-30 12:33:28 +0200550 queue_lockdep_assert_held(q);
Jens Axboee48ec692008-07-03 13:18:54 +0200551
552 if (!test_bit(flag, &q->queue_flags)) {
553 __set_bit(flag, &q->queue_flags);
554 return 0;
555 }
556
557 return 1;
558}
559
Nick Piggin75ad23b2008-04-29 14:48:33 +0200560static inline void queue_flag_set(unsigned int flag, struct request_queue *q)
561{
Andi Kleen8bcb6c72012-03-30 12:33:28 +0200562 queue_lockdep_assert_held(q);
Nick Piggin75ad23b2008-04-29 14:48:33 +0200563 __set_bit(flag, &q->queue_flags);
564}
565
566static inline void queue_flag_clear_unlocked(unsigned int flag,
567 struct request_queue *q)
568{
569 __clear_bit(flag, &q->queue_flags);
570}
571
Jens Axboe0a7ae2f2009-05-20 08:54:31 +0200572static inline int queue_in_flight(struct request_queue *q)
573{
574 return q->in_flight[0] + q->in_flight[1];
575}
576
Nick Piggin75ad23b2008-04-29 14:48:33 +0200577static inline void queue_flag_clear(unsigned int flag, struct request_queue *q)
578{
Andi Kleen8bcb6c72012-03-30 12:33:28 +0200579 queue_lockdep_assert_held(q);
Nick Piggin75ad23b2008-04-29 14:48:33 +0200580 __clear_bit(flag, &q->queue_flags);
581}
582
Linus Torvalds1da177e2005-04-16 15:20:36 -0700583#define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags)
584#define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags)
Bart Van Assche3f3299d2012-11-28 13:42:38 +0100585#define blk_queue_dying(q) test_bit(QUEUE_FLAG_DYING, &(q)->queue_flags)
Bart Van Asschec246e802012-12-06 14:32:01 +0100586#define blk_queue_dead(q) test_bit(QUEUE_FLAG_DEAD, &(q)->queue_flags)
Tejun Heod7325802012-03-05 13:14:58 -0800587#define blk_queue_bypass(q) test_bit(QUEUE_FLAG_BYPASS, &(q)->queue_flags)
Jens Axboe320ae512013-10-24 09:20:05 +0100588#define blk_queue_init_done(q) test_bit(QUEUE_FLAG_INIT_DONE, &(q)->queue_flags)
Alan D. Brunelleac9fafa2008-04-29 14:44:19 +0200589#define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags)
Alan D. Brunelle488991e2010-01-29 09:04:08 +0100590#define blk_queue_noxmerges(q) \
591 test_bit(QUEUE_FLAG_NOXMERGES, &(q)->queue_flags)
Jens Axboea68bbdd2008-09-24 13:03:33 +0200592#define blk_queue_nonrot(q) test_bit(QUEUE_FLAG_NONROT, &(q)->queue_flags)
Jens Axboebc58ba92009-01-23 10:54:44 +0100593#define blk_queue_io_stat(q) test_bit(QUEUE_FLAG_IO_STAT, &(q)->queue_flags)
Jens Axboee2e1a142010-06-09 10:42:09 +0200594#define blk_queue_add_random(q) test_bit(QUEUE_FLAG_ADD_RANDOM, &(q)->queue_flags)
Kiyoshi Ueda4ee5eaf2008-09-18 10:46:13 -0400595#define blk_queue_stackable(q) \
596 test_bit(QUEUE_FLAG_STACKABLE, &(q)->queue_flags)
Christoph Hellwigc15227d2009-09-30 13:52:12 +0200597#define blk_queue_discard(q) test_bit(QUEUE_FLAG_DISCARD, &(q)->queue_flags)
Christoph Hellwig288dab82016-06-09 16:00:36 +0200598#define blk_queue_secure_erase(q) \
599 (test_bit(QUEUE_FLAG_SECERASE, &(q)->queue_flags))
Toshi Kani163d4ba2016-06-23 17:05:50 -0400600#define blk_queue_dax(q) test_bit(QUEUE_FLAG_DAX, &(q)->queue_flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700601
Christoph Hellwig33659eb2010-08-07 18:17:56 +0200602#define blk_noretry_request(rq) \
603 ((rq)->cmd_flags & (REQ_FAILFAST_DEV|REQ_FAILFAST_TRANSPORT| \
604 REQ_FAILFAST_DRIVER))
Jens Axboe4aff5e22006-08-10 08:44:47 +0200605
Christoph Hellwig33659eb2010-08-07 18:17:56 +0200606#define blk_account_rq(rq) \
607 (((rq)->cmd_flags & REQ_STARTED) && \
Martin K. Petersene2a60da2012-09-18 12:19:25 -0400608 ((rq)->cmd_type == REQ_TYPE_FS))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700609
Jens Axboeab780f12008-08-26 10:25:02 +0200610#define blk_rq_cpu_valid(rq) ((rq)->cpu != -1)
FUJITA Tomonoriabae1fd2007-07-16 08:52:14 +0200611#define blk_bidi_rq(rq) ((rq)->next_rq != NULL)
Kiyoshi Ueda336cdb42007-12-11 17:40:30 -0500612/* rq->queuelist of dequeued request must be list_empty() */
613#define blk_queued_rq(rq) (!list_empty(&(rq)->queuelist))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700614
615#define list_entry_rq(ptr) list_entry((ptr), struct request, queuelist)
616
Mike Christie4e1b2d522016-06-05 14:32:22 -0500617#define rq_data_dir(rq) (op_is_write(req_op(rq)) ? WRITE : READ)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700618
Jens Axboe49fd5242014-04-16 10:57:18 -0600619/*
620 * Driver can handle struct request, if it either has an old style
621 * request_fn defined, or is blk-mq based.
622 */
623static inline bool queue_is_rq_based(struct request_queue *q)
624{
625 return q->request_fn || q->mq_ops;
626}
627
Martin K. Petersene692cb62010-12-01 19:41:49 +0100628static inline unsigned int blk_queue_cluster(struct request_queue *q)
629{
630 return q->limits.cluster;
631}
632
Jens Axboe9e2585a2006-07-28 09:26:13 +0200633/*
Jens Axboe1faa16d2009-04-06 14:48:01 +0200634 * We regard a request as sync, if either a read or a sync write
Jens Axboe9e2585a2006-07-28 09:26:13 +0200635 */
Mike Christied9d8c5c2016-06-05 14:32:16 -0500636static inline bool rw_is_sync(int op, unsigned int rw_flags)
Jens Axboe1faa16d2009-04-06 14:48:01 +0200637{
Mike Christied9d8c5c2016-06-05 14:32:16 -0500638 return op == REQ_OP_READ || (rw_flags & REQ_SYNC);
Jens Axboe1faa16d2009-04-06 14:48:01 +0200639}
640
641static inline bool rq_is_sync(struct request *rq)
642{
Mike Christied9d8c5c2016-06-05 14:32:16 -0500643 return rw_is_sync(req_op(rq), rq->cmd_flags);
Jens Axboe1faa16d2009-04-06 14:48:01 +0200644}
645
Tejun Heo5b788ce2012-06-04 20:40:59 -0700646static inline bool blk_rl_full(struct request_list *rl, bool sync)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700647{
Tejun Heo5b788ce2012-06-04 20:40:59 -0700648 unsigned int flag = sync ? BLK_RL_SYNCFULL : BLK_RL_ASYNCFULL;
649
650 return rl->flags & flag;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700651}
652
Tejun Heo5b788ce2012-06-04 20:40:59 -0700653static inline void blk_set_rl_full(struct request_list *rl, bool sync)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700654{
Tejun Heo5b788ce2012-06-04 20:40:59 -0700655 unsigned int flag = sync ? BLK_RL_SYNCFULL : BLK_RL_ASYNCFULL;
656
657 rl->flags |= flag;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700658}
659
Tejun Heo5b788ce2012-06-04 20:40:59 -0700660static inline void blk_clear_rl_full(struct request_list *rl, bool sync)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700661{
Tejun Heo5b788ce2012-06-04 20:40:59 -0700662 unsigned int flag = sync ? BLK_RL_SYNCFULL : BLK_RL_ASYNCFULL;
663
664 rl->flags &= ~flag;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700665}
666
Martin K. Petersene2a60da2012-09-18 12:19:25 -0400667static inline bool rq_mergeable(struct request *rq)
668{
669 if (rq->cmd_type != REQ_TYPE_FS)
670 return false;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700671
Mike Christie3a5e02c2016-06-05 14:32:23 -0500672 if (req_op(rq) == REQ_OP_FLUSH)
673 return false;
674
Martin K. Petersene2a60da2012-09-18 12:19:25 -0400675 if (rq->cmd_flags & REQ_NOMERGE_FLAGS)
676 return false;
677
678 return true;
679}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700680
Martin K. Petersen4363ac72012-09-18 12:19:27 -0400681static inline bool blk_write_same_mergeable(struct bio *a, struct bio *b)
682{
683 if (bio_data(a) == bio_data(b))
684 return true;
685
686 return false;
687}
688
Linus Torvalds1da177e2005-04-16 15:20:36 -0700689/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700690 * q->prep_rq_fn return values
691 */
Martin K. Petersen0fb5b1f2016-02-04 00:52:12 -0500692enum {
693 BLKPREP_OK, /* serve it */
694 BLKPREP_KILL, /* fatal error, kill, return -EIO */
695 BLKPREP_DEFER, /* leave on queue */
696 BLKPREP_INVALID, /* invalid command, kill, return -EREMOTEIO */
697};
Linus Torvalds1da177e2005-04-16 15:20:36 -0700698
699extern unsigned long blk_max_low_pfn, blk_max_pfn;
700
701/*
702 * standard bounce addresses:
703 *
704 * BLK_BOUNCE_HIGH : bounce all highmem pages
705 * BLK_BOUNCE_ANY : don't bounce anything
706 * BLK_BOUNCE_ISA : bounce pages above ISA DMA boundary
707 */
Andi Kleen24728922008-04-21 09:51:05 +0200708
709#if BITS_PER_LONG == 32
Linus Torvalds1da177e2005-04-16 15:20:36 -0700710#define BLK_BOUNCE_HIGH ((u64)blk_max_low_pfn << PAGE_SHIFT)
Andi Kleen24728922008-04-21 09:51:05 +0200711#else
712#define BLK_BOUNCE_HIGH -1ULL
713#endif
714#define BLK_BOUNCE_ANY (-1ULL)
FUJITA Tomonoribfe17232010-05-31 15:59:03 +0900715#define BLK_BOUNCE_ISA (DMA_BIT_MASK(24))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700716
Jens Axboe3d6392c2007-07-09 12:38:05 +0200717/*
718 * default timeout for SG_IO if none specified
719 */
720#define BLK_DEFAULT_SG_TIMEOUT (60 * HZ)
Linus Torvaldsf2f1fa72008-12-05 14:49:18 -0800721#define BLK_MIN_SG_TIMEOUT (7 * HZ)
Jens Axboe3d6392c2007-07-09 12:38:05 +0200722
Christoph Lameter2a7326b2007-07-17 04:03:37 -0700723#ifdef CONFIG_BOUNCE
Linus Torvalds1da177e2005-04-16 15:20:36 -0700724extern int init_emergency_isa_pool(void);
Jens Axboe165125e2007-07-24 09:28:11 +0200725extern void blk_queue_bounce(struct request_queue *q, struct bio **bio);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700726#else
727static inline int init_emergency_isa_pool(void)
728{
729 return 0;
730}
Jens Axboe165125e2007-07-24 09:28:11 +0200731static inline void blk_queue_bounce(struct request_queue *q, struct bio **bio)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700732{
733}
734#endif /* CONFIG_MMU */
735
FUJITA Tomonori152e2832008-08-28 16:17:06 +0900736struct rq_map_data {
737 struct page **pages;
738 int page_order;
739 int nr_entries;
FUJITA Tomonori56c451f2008-12-18 14:49:37 +0900740 unsigned long offset;
FUJITA Tomonori97ae77a2008-12-18 14:49:38 +0900741 int null_mapped;
FUJITA Tomonoriecb554a2009-07-09 14:46:53 +0200742 int from_user;
FUJITA Tomonori152e2832008-08-28 16:17:06 +0900743};
744
NeilBrown5705f702007-09-25 12:35:59 +0200745struct req_iterator {
Kent Overstreet79886132013-11-23 17:19:00 -0800746 struct bvec_iter iter;
NeilBrown5705f702007-09-25 12:35:59 +0200747 struct bio *bio;
748};
749
750/* This should not be used directly - use rq_for_each_segment */
Jens Axboe1e428072009-02-23 09:03:10 +0100751#define for_each_bio(_bio) \
752 for (; _bio; _bio = _bio->bi_next)
NeilBrown5705f702007-09-25 12:35:59 +0200753#define __rq_for_each_bio(_bio, rq) \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700754 if ((rq->bio)) \
755 for (_bio = (rq)->bio; _bio; _bio = _bio->bi_next)
756
NeilBrown5705f702007-09-25 12:35:59 +0200757#define rq_for_each_segment(bvl, _rq, _iter) \
758 __rq_for_each_bio(_iter.bio, _rq) \
Kent Overstreet79886132013-11-23 17:19:00 -0800759 bio_for_each_segment(bvl, _iter.bio, _iter.iter)
NeilBrown5705f702007-09-25 12:35:59 +0200760
Kent Overstreet4550dd62013-08-07 14:26:21 -0700761#define rq_iter_last(bvec, _iter) \
Kent Overstreet79886132013-11-23 17:19:00 -0800762 (_iter.bio->bi_next == NULL && \
Kent Overstreet4550dd62013-08-07 14:26:21 -0700763 bio_iter_last(bvec, _iter.iter))
NeilBrown5705f702007-09-25 12:35:59 +0200764
Ilya Loginov2d4dc892009-11-26 09:16:19 +0100765#ifndef ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
766# error "You should define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE for your platform"
767#endif
768#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
769extern void rq_flush_dcache_pages(struct request *rq);
770#else
771static inline void rq_flush_dcache_pages(struct request *rq)
772{
773}
774#endif
775
Toshi Kani2af3a812016-05-10 10:23:52 -0600776#ifdef CONFIG_PRINTK
777#define vfs_msg(sb, level, fmt, ...) \
778 __vfs_msg(sb, level, fmt, ##__VA_ARGS__)
779#else
780#define vfs_msg(sb, level, fmt, ...) \
781do { \
782 no_printk(fmt, ##__VA_ARGS__); \
783 __vfs_msg(sb, "", " "); \
784} while (0)
785#endif
786
Linus Torvalds1da177e2005-04-16 15:20:36 -0700787extern int blk_register_queue(struct gendisk *disk);
788extern void blk_unregister_queue(struct gendisk *disk);
Jens Axboedece1632015-11-05 10:41:16 -0700789extern blk_qc_t generic_make_request(struct bio *bio);
FUJITA Tomonori2a4aa302008-04-29 09:54:36 +0200790extern void blk_rq_init(struct request_queue *q, struct request *rq);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700791extern void blk_put_request(struct request *);
Jens Axboe165125e2007-07-24 09:28:11 +0200792extern void __blk_put_request(struct request_queue *, struct request *);
Jens Axboe165125e2007-07-24 09:28:11 +0200793extern struct request *blk_get_request(struct request_queue *, int, gfp_t);
Jens Axboef27b0872014-06-06 07:57:37 -0600794extern void blk_rq_set_block_pc(struct request *);
Jens Axboe165125e2007-07-24 09:28:11 +0200795extern void blk_requeue_request(struct request_queue *, struct request *);
Christoph Hellwig66ac0282010-06-18 16:59:42 +0200796extern void blk_add_request_payload(struct request *rq, struct page *page,
Ming Lin37e58232016-03-22 00:24:44 -0700797 int offset, unsigned int len);
Kiyoshi Uedaef9e3fa2008-10-01 16:12:15 +0200798extern int blk_lld_busy(struct request_queue *q);
Mike Snitzer78d8e582015-06-26 10:01:13 -0400799extern int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
800 struct bio_set *bs, gfp_t gfp_mask,
801 int (*bio_ctr)(struct bio *, struct bio *, void *),
802 void *data);
803extern void blk_rq_unprep_clone(struct request *rq);
Kiyoshi Ueda82124d62008-09-18 10:45:38 -0400804extern int blk_insert_cloned_request(struct request_queue *q,
805 struct request *rq);
Christoph Hellwig98d61d52016-07-19 11:31:51 +0200806extern int blk_rq_append_bio(struct request *rq, struct bio *bio);
Jens Axboe3cca6dc2011-03-02 11:08:00 -0500807extern void blk_delay_queue(struct request_queue *, unsigned long);
Kent Overstreet54efd502015-04-23 22:37:18 -0700808extern void blk_queue_split(struct request_queue *, struct bio **,
809 struct bio_set *);
Jens Axboe165125e2007-07-24 09:28:11 +0200810extern void blk_recount_segments(struct request_queue *, struct bio *);
Paolo Bonzini0bfc96c2012-01-12 16:01:28 +0100811extern int scsi_verify_blk_ioctl(struct block_device *, unsigned int);
Paolo Bonzini577ebb32012-01-12 16:01:27 +0100812extern int scsi_cmd_blk_ioctl(struct block_device *, fmode_t,
813 unsigned int, void __user *);
Al Viro74f3c8a2007-08-27 15:38:10 -0400814extern int scsi_cmd_ioctl(struct request_queue *, struct gendisk *, fmode_t,
815 unsigned int, void __user *);
Al Viroe915e872008-09-02 17:16:41 -0400816extern int sg_scsi_ioctl(struct request_queue *, struct gendisk *, fmode_t,
817 struct scsi_ioctl_command __user *);
Andrew Morton3fcfab12006-10-19 23:28:16 -0700818
AnilKumar Chimata7214d7e2017-06-23 03:09:59 -0700819extern void blk_recalc_rq_segments(struct request *rq);
Christoph Hellwig6f3b0e82015-11-26 09:13:05 +0100820extern int blk_queue_enter(struct request_queue *q, bool nowait);
Dan Williams2e6edc92015-11-19 13:29:28 -0800821extern void blk_queue_exit(struct request_queue *q);
Jens Axboe165125e2007-07-24 09:28:11 +0200822extern void blk_start_queue(struct request_queue *q);
Jens Axboe21491412015-12-28 13:01:22 -0700823extern void blk_start_queue_async(struct request_queue *q);
Jens Axboe165125e2007-07-24 09:28:11 +0200824extern void blk_stop_queue(struct request_queue *q);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700825extern void blk_sync_queue(struct request_queue *q);
Jens Axboe165125e2007-07-24 09:28:11 +0200826extern void __blk_stop_queue(struct request_queue *q);
Christoph Hellwig24ecfbe2011-04-18 11:41:33 +0200827extern void __blk_run_queue(struct request_queue *q);
Christoph Hellwiga7928c12015-04-17 22:37:20 +0200828extern void __blk_run_queue_uncond(struct request_queue *q);
Jens Axboe165125e2007-07-24 09:28:11 +0200829extern void blk_run_queue(struct request_queue *);
Jens Axboec21e6be2011-04-19 13:32:46 +0200830extern void blk_run_queue_async(struct request_queue *q);
FUJITA Tomonoria3bce902008-08-28 16:17:05 +0900831extern int blk_rq_map_user(struct request_queue *, struct request *,
FUJITA Tomonori152e2832008-08-28 16:17:06 +0900832 struct rq_map_data *, void __user *, unsigned long,
833 gfp_t);
Jens Axboe8e5cfc42006-12-19 11:12:46 +0100834extern int blk_rq_unmap_user(struct bio *);
Jens Axboe165125e2007-07-24 09:28:11 +0200835extern int blk_rq_map_kern(struct request_queue *, struct request *, void *, unsigned int, gfp_t);
836extern int blk_rq_map_user_iov(struct request_queue *, struct request *,
Kent Overstreet26e49cf2015-01-18 16:16:31 +0100837 struct rq_map_data *, const struct iov_iter *,
838 gfp_t);
Jens Axboe165125e2007-07-24 09:28:11 +0200839extern int blk_execute_rq(struct request_queue *, struct gendisk *,
James Bottomley 994ca9a2005-06-20 14:11:09 +0200840 struct request *, int);
Jens Axboe165125e2007-07-24 09:28:11 +0200841extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *,
Jens Axboe15fc8582006-01-06 10:00:50 +0100842 struct request *, int, rq_end_io_fn *);
Mike Christie6e39b69e2005-11-11 05:30:24 -0600843
Jens Axboe05229be2015-11-05 10:44:55 -0700844bool blk_poll(struct request_queue *q, blk_qc_t cookie);
845
Jens Axboe165125e2007-07-24 09:28:11 +0200846static inline struct request_queue *bdev_get_queue(struct block_device *bdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700847{
Tejun Heoff9ea322014-09-08 08:03:56 +0900848 return bdev->bd_disk->queue; /* this is never NULL */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700849}
850
Linus Torvalds1da177e2005-04-16 15:20:36 -0700851/*
Tejun Heo80a761f2009-07-03 17:48:17 +0900852 * blk_rq_pos() : the current sector
853 * blk_rq_bytes() : bytes left in the entire request
854 * blk_rq_cur_bytes() : bytes left in the current segment
855 * blk_rq_err_bytes() : bytes left till the next error boundary
856 * blk_rq_sectors() : sectors left in the entire request
857 * blk_rq_cur_sectors() : sectors left in the current segment
Tejun Heo5efccd12009-04-23 11:05:18 +0900858 */
Tejun Heo5b936292009-05-07 22:24:38 +0900859static inline sector_t blk_rq_pos(const struct request *rq)
860{
Tejun Heoa2dec7b2009-05-07 22:24:44 +0900861 return rq->__sector;
Tejun Heo5b936292009-05-07 22:24:38 +0900862}
863
Tejun Heo2e46e8b2009-05-07 22:24:41 +0900864static inline unsigned int blk_rq_bytes(const struct request *rq)
865{
Tejun Heoa2dec7b2009-05-07 22:24:44 +0900866 return rq->__data_len;
Tejun Heo2e46e8b2009-05-07 22:24:41 +0900867}
868
869static inline int blk_rq_cur_bytes(const struct request *rq)
870{
871 return rq->bio ? bio_cur_bytes(rq->bio) : 0;
872}
Tejun Heo5efccd12009-04-23 11:05:18 +0900873
Tejun Heo80a761f2009-07-03 17:48:17 +0900874extern unsigned int blk_rq_err_bytes(const struct request *rq);
875
Tejun Heo5b936292009-05-07 22:24:38 +0900876static inline unsigned int blk_rq_sectors(const struct request *rq)
877{
Tejun Heo2e46e8b2009-05-07 22:24:41 +0900878 return blk_rq_bytes(rq) >> 9;
Tejun Heo5b936292009-05-07 22:24:38 +0900879}
880
881static inline unsigned int blk_rq_cur_sectors(const struct request *rq)
882{
Tejun Heo2e46e8b2009-05-07 22:24:41 +0900883 return blk_rq_cur_bytes(rq) >> 9;
Tejun Heo5b936292009-05-07 22:24:38 +0900884}
885
Martin K. Petersenf31dc1c2012-09-18 12:19:26 -0400886static inline unsigned int blk_queue_get_max_sectors(struct request_queue *q,
Mike Christie8fe0d472016-06-05 14:32:15 -0500887 int op)
Martin K. Petersenf31dc1c2012-09-18 12:19:26 -0400888{
Adrian Hunter7afafc82016-08-16 10:59:35 +0300889 if (unlikely(op == REQ_OP_DISCARD || op == REQ_OP_SECURE_ERASE))
James Bottomley871dd922013-04-24 08:52:50 -0600890 return min(q->limits.max_discard_sectors, UINT_MAX >> 9);
Martin K. Petersenf31dc1c2012-09-18 12:19:26 -0400891
Mike Christie8fe0d472016-06-05 14:32:15 -0500892 if (unlikely(op == REQ_OP_WRITE_SAME))
Martin K. Petersen4363ac72012-09-18 12:19:27 -0400893 return q->limits.max_write_same_sectors;
894
Martin K. Petersenf31dc1c2012-09-18 12:19:26 -0400895 return q->limits.max_sectors;
896}
897
Jens Axboe762380a2014-06-05 13:38:39 -0600898/*
899 * Return maximum size of a request at given offset. Only valid for
900 * file system requests.
901 */
902static inline unsigned int blk_max_size_offset(struct request_queue *q,
903 sector_t offset)
904{
905 if (!q->limits.chunk_sectors)
Jens Axboe736ed4d2014-06-17 22:09:29 -0700906 return q->limits.max_sectors;
Jens Axboe762380a2014-06-05 13:38:39 -0600907
908 return q->limits.chunk_sectors -
909 (offset & (q->limits.chunk_sectors - 1));
910}
911
Damien Le Moal17007f32016-07-20 21:40:47 -0600912static inline unsigned int blk_rq_get_max_sectors(struct request *rq,
913 sector_t offset)
Martin K. Petersenf31dc1c2012-09-18 12:19:26 -0400914{
915 struct request_queue *q = rq->q;
916
Christoph Hellwigf21018422016-03-03 14:43:45 -0700917 if (unlikely(rq->cmd_type != REQ_TYPE_FS))
Martin K. Petersenf31dc1c2012-09-18 12:19:26 -0400918 return q->limits.max_hw_sectors;
919
Adrian Hunter7afafc82016-08-16 10:59:35 +0300920 if (!q->limits.chunk_sectors ||
921 req_op(rq) == REQ_OP_DISCARD ||
922 req_op(rq) == REQ_OP_SECURE_ERASE)
Mike Christie8fe0d472016-06-05 14:32:15 -0500923 return blk_queue_get_max_sectors(q, req_op(rq));
Jens Axboe762380a2014-06-05 13:38:39 -0600924
Damien Le Moal17007f32016-07-20 21:40:47 -0600925 return min(blk_max_size_offset(q, offset),
Mike Christie8fe0d472016-06-05 14:32:15 -0500926 blk_queue_get_max_sectors(q, req_op(rq)));
Martin K. Petersenf31dc1c2012-09-18 12:19:26 -0400927}
928
Jun'ichi Nomura75afb352013-09-21 13:57:47 -0600929static inline unsigned int blk_rq_count_bios(struct request *rq)
930{
931 unsigned int nr_bios = 0;
932 struct bio *bio;
933
934 __rq_for_each_bio(bio, rq)
935 nr_bios++;
936
937 return nr_bios;
938}
939
Tejun Heo5efccd12009-04-23 11:05:18 +0900940/*
Tejun Heo9934c8c2009-05-08 11:54:16 +0900941 * Request issue related functions.
942 */
943extern struct request *blk_peek_request(struct request_queue *q);
944extern void blk_start_request(struct request *rq);
945extern struct request *blk_fetch_request(struct request_queue *q);
946
947/*
Tejun Heo2e60e022009-04-23 11:05:18 +0900948 * Request completion related functions.
949 *
950 * blk_update_request() completes given number of bytes and updates
951 * the request without completing it.
952 *
Tejun Heof06d9a22009-04-23 11:05:19 +0900953 * blk_end_request() and friends. __blk_end_request() must be called
954 * with the request queue spinlock acquired.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700955 *
956 * Several drivers define their own end_request and call
Kiyoshi Ueda3bcddea2007-12-11 17:52:28 -0500957 * blk_end_request() for parts of the original function.
958 * This prevents code duplication in drivers.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700959 */
Tejun Heo2e60e022009-04-23 11:05:18 +0900960extern bool blk_update_request(struct request *rq, int error,
961 unsigned int nr_bytes);
Christoph Hellwig12120072014-04-16 09:44:59 +0200962extern void blk_finish_request(struct request *rq, int error);
FUJITA Tomonorib1f74492009-05-11 17:56:09 +0900963extern bool blk_end_request(struct request *rq, int error,
964 unsigned int nr_bytes);
965extern void blk_end_request_all(struct request *rq, int error);
966extern bool blk_end_request_cur(struct request *rq, int error);
Tejun Heo80a761f2009-07-03 17:48:17 +0900967extern bool blk_end_request_err(struct request *rq, int error);
FUJITA Tomonorib1f74492009-05-11 17:56:09 +0900968extern bool __blk_end_request(struct request *rq, int error,
969 unsigned int nr_bytes);
970extern void __blk_end_request_all(struct request *rq, int error);
971extern bool __blk_end_request_cur(struct request *rq, int error);
Tejun Heo80a761f2009-07-03 17:48:17 +0900972extern bool __blk_end_request_err(struct request *rq, int error);
Tejun Heo2e60e022009-04-23 11:05:18 +0900973
Jens Axboeff856ba2006-01-09 16:02:34 +0100974extern void blk_complete_request(struct request *);
Jens Axboe242f9dc2008-09-14 05:55:09 -0700975extern void __blk_complete_request(struct request *);
976extern void blk_abort_request(struct request *);
James Bottomley28018c22010-07-01 19:49:17 +0900977extern void blk_unprep_request(struct request *);
Jens Axboeff856ba2006-01-09 16:02:34 +0100978
Linus Torvalds1da177e2005-04-16 15:20:36 -0700979/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700980 * Access functions for manipulating queue properties
981 */
Jens Axboe165125e2007-07-24 09:28:11 +0200982extern struct request_queue *blk_init_queue_node(request_fn_proc *rfn,
Christoph Lameter19460892005-06-23 00:08:19 -0700983 spinlock_t *lock, int node_id);
Jens Axboe165125e2007-07-24 09:28:11 +0200984extern struct request_queue *blk_init_queue(request_fn_proc *, spinlock_t *);
Mike Snitzer01effb02010-05-11 08:57:42 +0200985extern struct request_queue *blk_init_allocated_queue(struct request_queue *,
986 request_fn_proc *, spinlock_t *);
Jens Axboe165125e2007-07-24 09:28:11 +0200987extern void blk_cleanup_queue(struct request_queue *);
988extern void blk_queue_make_request(struct request_queue *, make_request_fn *);
989extern void blk_queue_bounce_limit(struct request_queue *, u64);
Martin K. Petersen086fa5f2010-02-26 00:20:38 -0500990extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int);
Jens Axboe762380a2014-06-05 13:38:39 -0600991extern void blk_queue_chunk_sectors(struct request_queue *, unsigned int);
Martin K. Petersen8a783622010-02-26 00:20:39 -0500992extern void blk_queue_max_segments(struct request_queue *, unsigned short);
Jens Axboe165125e2007-07-24 09:28:11 +0200993extern void blk_queue_max_segment_size(struct request_queue *, unsigned int);
Christoph Hellwig67efc922009-09-30 13:54:20 +0200994extern void blk_queue_max_discard_sectors(struct request_queue *q,
995 unsigned int max_discard_sectors);
Martin K. Petersen4363ac72012-09-18 12:19:27 -0400996extern void blk_queue_max_write_same_sectors(struct request_queue *q,
997 unsigned int max_write_same_sectors);
Martin K. Petersene1defc42009-05-22 17:17:49 -0400998extern void blk_queue_logical_block_size(struct request_queue *, unsigned short);
Martin K. Petersen892b6f92010-10-13 21:18:03 +0200999extern void blk_queue_physical_block_size(struct request_queue *, unsigned int);
Martin K. Petersenc72758f2009-05-22 17:17:53 -04001000extern void blk_queue_alignment_offset(struct request_queue *q,
1001 unsigned int alignment);
Martin K. Petersen7c958e32009-07-31 11:49:11 -04001002extern void blk_limits_io_min(struct queue_limits *limits, unsigned int min);
Martin K. Petersenc72758f2009-05-22 17:17:53 -04001003extern void blk_queue_io_min(struct request_queue *q, unsigned int min);
Martin K. Petersen3c5820c2009-09-11 21:54:52 +02001004extern void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt);
Martin K. Petersenc72758f2009-05-22 17:17:53 -04001005extern void blk_queue_io_opt(struct request_queue *q, unsigned int opt);
Martin K. Petersene475bba2009-06-16 08:23:52 +02001006extern void blk_set_default_limits(struct queue_limits *lim);
Martin K. Petersenb1bd0552012-01-11 16:27:11 +01001007extern void blk_set_stacking_limits(struct queue_limits *lim);
Martin K. Petersenc72758f2009-05-22 17:17:53 -04001008extern int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
1009 sector_t offset);
Martin K. Petersen17be8c22010-01-11 03:21:49 -05001010extern int bdev_stack_limits(struct queue_limits *t, struct block_device *bdev,
1011 sector_t offset);
Martin K. Petersenc72758f2009-05-22 17:17:53 -04001012extern void disk_stack_limits(struct gendisk *disk, struct block_device *bdev,
1013 sector_t offset);
Jens Axboe165125e2007-07-24 09:28:11 +02001014extern void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b);
Tejun Heoe3790c72008-03-04 11:18:17 +01001015extern void blk_queue_dma_pad(struct request_queue *, unsigned int);
FUJITA Tomonori27f82212008-07-04 09:30:03 +02001016extern void blk_queue_update_dma_pad(struct request_queue *, unsigned int);
Tejun Heo2fb98e82008-02-19 11:36:53 +01001017extern int blk_queue_dma_drain(struct request_queue *q,
1018 dma_drain_needed_fn *dma_drain_needed,
1019 void *buf, unsigned int size);
Kiyoshi Uedaef9e3fa2008-10-01 16:12:15 +02001020extern void blk_queue_lld_busy(struct request_queue *q, lld_busy_fn *fn);
Jens Axboe165125e2007-07-24 09:28:11 +02001021extern void blk_queue_segment_boundary(struct request_queue *, unsigned long);
Keith Busch03100aa2015-08-19 14:24:05 -07001022extern void blk_queue_virt_boundary(struct request_queue *, unsigned long);
Jens Axboe165125e2007-07-24 09:28:11 +02001023extern void blk_queue_prep_rq(struct request_queue *, prep_rq_fn *pfn);
James Bottomley28018c22010-07-01 19:49:17 +09001024extern void blk_queue_unprep_rq(struct request_queue *, unprep_rq_fn *ufn);
Jens Axboe165125e2007-07-24 09:28:11 +02001025extern void blk_queue_dma_alignment(struct request_queue *, int);
James Bottomley11c3e682007-12-31 16:37:00 -06001026extern void blk_queue_update_dma_alignment(struct request_queue *, int);
Jens Axboe165125e2007-07-24 09:28:11 +02001027extern void blk_queue_softirq_done(struct request_queue *, softirq_done_fn *);
Jens Axboe242f9dc2008-09-14 05:55:09 -07001028extern void blk_queue_rq_timed_out(struct request_queue *, rq_timed_out_fn *);
1029extern void blk_queue_rq_timeout(struct request_queue *, unsigned int);
shaohua.li@intel.comf3876932011-05-06 11:34:32 -06001030extern void blk_queue_flush_queueable(struct request_queue *q, bool queueable);
Jens Axboe93e9d8e2016-04-12 12:32:46 -06001031extern void blk_queue_write_cache(struct request_queue *q, bool enabled, bool fua);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001032extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001033
Jens Axboe165125e2007-07-24 09:28:11 +02001034extern int blk_rq_map_sg(struct request_queue *, struct request *, struct scatterlist *);
AnilKumar Chimata7214d7e2017-06-23 03:09:59 -07001035extern int blk_rq_map_sg_no_cluster(struct request_queue *q, struct request *rq,
1036 struct scatterlist *sglist);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001037extern void blk_dump_rq_flags(struct request *, char *);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001038extern long nr_blockdev_pages(void);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001039
Tejun Heo09ac46c2011-12-14 00:33:38 +01001040bool __must_check blk_get_queue(struct request_queue *);
Jens Axboe165125e2007-07-24 09:28:11 +02001041struct request_queue *blk_alloc_queue(gfp_t);
1042struct request_queue *blk_alloc_queue_node(gfp_t, int);
1043extern void blk_put_queue(struct request_queue *);
Jens Axboe3f21c262015-06-05 10:57:37 -06001044extern void blk_set_queue_dying(struct request_queue *);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001045
Shaohua Li316cc672011-07-08 08:19:21 +02001046/*
Lin Ming6c954662013-03-23 11:42:26 +08001047 * block layer runtime pm functions
1048 */
Rafael J. Wysocki47fafbc2014-12-04 01:00:23 +01001049#ifdef CONFIG_PM
Lin Ming6c954662013-03-23 11:42:26 +08001050extern void blk_pm_runtime_init(struct request_queue *q, struct device *dev);
1051extern int blk_pre_runtime_suspend(struct request_queue *q);
1052extern void blk_post_runtime_suspend(struct request_queue *q, int err);
1053extern void blk_pre_runtime_resume(struct request_queue *q);
1054extern void blk_post_runtime_resume(struct request_queue *q, int err);
Mika Westerbergd07ab6d2016-02-18 10:54:11 +02001055extern void blk_set_runtime_active(struct request_queue *q);
Lin Ming6c954662013-03-23 11:42:26 +08001056#else
1057static inline void blk_pm_runtime_init(struct request_queue *q,
1058 struct device *dev) {}
1059static inline int blk_pre_runtime_suspend(struct request_queue *q)
1060{
1061 return -ENOSYS;
1062}
1063static inline void blk_post_runtime_suspend(struct request_queue *q, int err) {}
1064static inline void blk_pre_runtime_resume(struct request_queue *q) {}
1065static inline void blk_post_runtime_resume(struct request_queue *q, int err) {}
Tobias Klauserf99694c2016-11-18 15:16:06 +01001066static inline void blk_set_runtime_active(struct request_queue *q) {}
Lin Ming6c954662013-03-23 11:42:26 +08001067#endif
1068
1069/*
Suresh Jayaraman75df7132011-09-21 10:00:16 +02001070 * blk_plug permits building a queue of related requests by holding the I/O
1071 * fragments for a short period. This allows merging of sequential requests
1072 * into single larger request. As the requests are moved from a per-task list to
1073 * the device's request_queue in a batch, this results in improved scalability
1074 * as the lock contention for request_queue lock is reduced.
1075 *
1076 * It is ok not to disable preemption when adding the request to the plug list
1077 * or when attempting a merge, because blk_schedule_flush_list() will only flush
1078 * the plug list when the task sleeps by itself. For details, please see
1079 * schedule() where blk_schedule_flush_plug() is called.
Shaohua Li316cc672011-07-08 08:19:21 +02001080 */
Jens Axboe73c10102011-03-08 13:19:51 +01001081struct blk_plug {
Suresh Jayaraman75df7132011-09-21 10:00:16 +02001082 struct list_head list; /* requests */
Jens Axboe320ae512013-10-24 09:20:05 +01001083 struct list_head mq_list; /* blk-mq requests */
Suresh Jayaraman75df7132011-09-21 10:00:16 +02001084 struct list_head cb_list; /* md requires an unplug callback */
Jens Axboe73c10102011-03-08 13:19:51 +01001085};
Shaohua Li55c022b2011-07-08 08:19:20 +02001086#define BLK_MAX_REQUEST_COUNT 16
1087
NeilBrown9cbb1752012-07-31 09:08:14 +02001088struct blk_plug_cb;
NeilBrown74018dc2012-07-31 09:08:15 +02001089typedef void (*blk_plug_cb_fn)(struct blk_plug_cb *, bool);
NeilBrown048c9372011-04-18 09:52:22 +02001090struct blk_plug_cb {
1091 struct list_head list;
NeilBrown9cbb1752012-07-31 09:08:14 +02001092 blk_plug_cb_fn callback;
1093 void *data;
NeilBrown048c9372011-04-18 09:52:22 +02001094};
NeilBrown9cbb1752012-07-31 09:08:14 +02001095extern struct blk_plug_cb *blk_check_plugged(blk_plug_cb_fn unplug,
1096 void *data, int size);
Jens Axboe73c10102011-03-08 13:19:51 +01001097extern void blk_start_plug(struct blk_plug *);
1098extern void blk_finish_plug(struct blk_plug *);
Jens Axboef6603782011-04-15 15:49:07 +02001099extern void blk_flush_plug_list(struct blk_plug *, bool);
Jens Axboe73c10102011-03-08 13:19:51 +01001100
1101static inline void blk_flush_plug(struct task_struct *tsk)
1102{
1103 struct blk_plug *plug = tsk->plug;
1104
Christoph Hellwig88b996c2011-04-15 15:20:10 +02001105 if (plug)
Jens Axboea237c1c2011-04-16 13:27:55 +02001106 blk_flush_plug_list(plug, false);
1107}
1108
1109static inline void blk_schedule_flush_plug(struct task_struct *tsk)
1110{
1111 struct blk_plug *plug = tsk->plug;
1112
1113 if (plug)
Jens Axboef6603782011-04-15 15:49:07 +02001114 blk_flush_plug_list(plug, true);
Jens Axboe73c10102011-03-08 13:19:51 +01001115}
1116
1117static inline bool blk_needs_flush_plug(struct task_struct *tsk)
1118{
1119 struct blk_plug *plug = tsk->plug;
1120
Jens Axboe320ae512013-10-24 09:20:05 +01001121 return plug &&
1122 (!list_empty(&plug->list) ||
1123 !list_empty(&plug->mq_list) ||
1124 !list_empty(&plug->cb_list));
Jens Axboe73c10102011-03-08 13:19:51 +01001125}
1126
Linus Torvalds1da177e2005-04-16 15:20:36 -07001127/*
1128 * tag stuff
1129 */
Jens Axboe165125e2007-07-24 09:28:11 +02001130extern int blk_queue_start_tag(struct request_queue *, struct request *);
1131extern struct request *blk_queue_find_tag(struct request_queue *, int);
1132extern void blk_queue_end_tag(struct request_queue *, struct request *);
Shaohua Liee1b6f72015-01-15 17:32:25 -08001133extern int blk_queue_init_tags(struct request_queue *, int, struct blk_queue_tag *, int);
Jens Axboe165125e2007-07-24 09:28:11 +02001134extern void blk_queue_free_tags(struct request_queue *);
1135extern int blk_queue_resize_tags(struct request_queue *, int);
1136extern void blk_queue_invalidate_tags(struct request_queue *);
Shaohua Liee1b6f72015-01-15 17:32:25 -08001137extern struct blk_queue_tag *blk_init_tags(int, int);
James Bottomley492dfb42006-08-30 15:48:45 -04001138extern void blk_free_tags(struct blk_queue_tag *);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001139
David C Somayajuluf583f492006-10-04 08:27:25 +02001140static inline struct request *blk_map_queue_find_tag(struct blk_queue_tag *bqt,
1141 int tag)
1142{
1143 if (unlikely(bqt == NULL || tag >= bqt->real_max_depth))
1144 return NULL;
1145 return bqt->tag_index[tag];
1146}
Christoph Hellwigdd3932e2010-09-16 20:51:46 +02001147
Christoph Hellwige950fdf2016-07-19 11:23:33 +02001148
1149#define BLKDEV_DISCARD_SECURE (1 << 0) /* issue a secure erase */
1150#define BLKDEV_DISCARD_ZERO (1 << 1) /* must reliably zero data */
Christoph Hellwigdd3932e2010-09-16 20:51:46 +02001151
1152extern int blkdev_issue_flush(struct block_device *, gfp_t, sector_t *);
Dmitry Monakhovfbd9b092010-04-28 17:55:06 +04001153extern int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
1154 sector_t nr_sects, gfp_t gfp_mask, unsigned long flags);
Christoph Hellwig38f25252016-04-16 14:55:28 -04001155extern int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
Christoph Hellwig288dab82016-06-09 16:00:36 +02001156 sector_t nr_sects, gfp_t gfp_mask, int flags,
Mike Christie469e3212016-06-05 14:31:49 -05001157 struct bio **biop);
Martin K. Petersen4363ac72012-09-18 12:19:27 -04001158extern int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
1159 sector_t nr_sects, gfp_t gfp_mask, struct page *page);
Dmitry Monakhov3f14d792010-04-28 17:55:09 +04001160extern int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
Martin K. Petersend93ba7a2015-01-20 20:06:30 -05001161 sector_t nr_sects, gfp_t gfp_mask, bool discard);
Christoph Hellwig2cf6d262010-08-18 05:29:10 -04001162static inline int sb_issue_discard(struct super_block *sb, sector_t block,
1163 sector_t nr_blocks, gfp_t gfp_mask, unsigned long flags)
David Woodhousefb2dce82008-08-05 18:01:53 +01001164{
Christoph Hellwig2cf6d262010-08-18 05:29:10 -04001165 return blkdev_issue_discard(sb->s_bdev, block << (sb->s_blocksize_bits - 9),
1166 nr_blocks << (sb->s_blocksize_bits - 9),
1167 gfp_mask, flags);
David Woodhousefb2dce82008-08-05 18:01:53 +01001168}
Lukas Czernere6fa0be2010-10-27 21:30:04 -04001169static inline int sb_issue_zeroout(struct super_block *sb, sector_t block,
Theodore Ts'oa107e5a2010-10-27 23:44:47 -04001170 sector_t nr_blocks, gfp_t gfp_mask)
Lukas Czernere6fa0be2010-10-27 21:30:04 -04001171{
1172 return blkdev_issue_zeroout(sb->s_bdev,
1173 block << (sb->s_blocksize_bits - 9),
1174 nr_blocks << (sb->s_blocksize_bits - 9),
Martin K. Petersend93ba7a2015-01-20 20:06:30 -05001175 gfp_mask, true);
Lukas Czernere6fa0be2010-10-27 21:30:04 -04001176}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001177
Jens Axboe018e0442009-06-26 16:27:10 +02001178extern int blk_verify_command(unsigned char *cmd, fmode_t has_write_perm);
Adel Gadllah0b07de82008-06-26 13:48:27 +02001179
Martin K. Peterseneb28d312010-02-26 00:20:37 -05001180enum blk_default_limits {
1181 BLK_MAX_SEGMENTS = 128,
1182 BLK_SAFE_MAX_SECTORS = 255,
Jeff Moyerd2be5372015-08-13 14:57:57 -04001183 BLK_DEF_MAX_SECTORS = 2560,
Martin K. Peterseneb28d312010-02-26 00:20:37 -05001184 BLK_MAX_SEGMENT_SIZE = 65536,
1185 BLK_SEG_BOUNDARY_MASK = 0xFFFFFFFFUL,
1186};
Milan Broz0e435ac2008-12-03 12:55:08 +01001187
Linus Torvalds1da177e2005-04-16 15:20:36 -07001188#define blkdev_entry_to_request(entry) list_entry((entry), struct request, queuelist)
1189
Martin K. Petersenae03bf62009-05-22 17:17:50 -04001190static inline unsigned long queue_bounce_pfn(struct request_queue *q)
1191{
Martin K. Petersen025146e2009-05-22 17:17:51 -04001192 return q->limits.bounce_pfn;
Martin K. Petersenae03bf62009-05-22 17:17:50 -04001193}
1194
1195static inline unsigned long queue_segment_boundary(struct request_queue *q)
1196{
Martin K. Petersen025146e2009-05-22 17:17:51 -04001197 return q->limits.seg_boundary_mask;
Martin K. Petersenae03bf62009-05-22 17:17:50 -04001198}
1199
Keith Busch03100aa2015-08-19 14:24:05 -07001200static inline unsigned long queue_virt_boundary(struct request_queue *q)
1201{
1202 return q->limits.virt_boundary_mask;
1203}
1204
Martin K. Petersenae03bf62009-05-22 17:17:50 -04001205static inline unsigned int queue_max_sectors(struct request_queue *q)
1206{
Martin K. Petersen025146e2009-05-22 17:17:51 -04001207 return q->limits.max_sectors;
Martin K. Petersenae03bf62009-05-22 17:17:50 -04001208}
1209
1210static inline unsigned int queue_max_hw_sectors(struct request_queue *q)
1211{
Martin K. Petersen025146e2009-05-22 17:17:51 -04001212 return q->limits.max_hw_sectors;
Martin K. Petersenae03bf62009-05-22 17:17:50 -04001213}
1214
Martin K. Petersen8a783622010-02-26 00:20:39 -05001215static inline unsigned short queue_max_segments(struct request_queue *q)
Martin K. Petersenae03bf62009-05-22 17:17:50 -04001216{
Martin K. Petersen8a783622010-02-26 00:20:39 -05001217 return q->limits.max_segments;
Martin K. Petersenae03bf62009-05-22 17:17:50 -04001218}
1219
1220static inline unsigned int queue_max_segment_size(struct request_queue *q)
1221{
Martin K. Petersen025146e2009-05-22 17:17:51 -04001222 return q->limits.max_segment_size;
Martin K. Petersenae03bf62009-05-22 17:17:50 -04001223}
1224
Martin K. Petersene1defc42009-05-22 17:17:49 -04001225static inline unsigned short queue_logical_block_size(struct request_queue *q)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001226{
1227 int retval = 512;
1228
Martin K. Petersen025146e2009-05-22 17:17:51 -04001229 if (q && q->limits.logical_block_size)
1230 retval = q->limits.logical_block_size;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001231
1232 return retval;
1233}
1234
Martin K. Petersene1defc42009-05-22 17:17:49 -04001235static inline unsigned short bdev_logical_block_size(struct block_device *bdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001236{
Martin K. Petersene1defc42009-05-22 17:17:49 -04001237 return queue_logical_block_size(bdev_get_queue(bdev));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001238}
1239
Martin K. Petersenc72758f2009-05-22 17:17:53 -04001240static inline unsigned int queue_physical_block_size(struct request_queue *q)
1241{
1242 return q->limits.physical_block_size;
1243}
1244
Martin K. Petersen892b6f92010-10-13 21:18:03 +02001245static inline unsigned int bdev_physical_block_size(struct block_device *bdev)
Martin K. Petersenac481c22009-10-03 20:52:01 +02001246{
1247 return queue_physical_block_size(bdev_get_queue(bdev));
1248}
1249
Martin K. Petersenc72758f2009-05-22 17:17:53 -04001250static inline unsigned int queue_io_min(struct request_queue *q)
1251{
1252 return q->limits.io_min;
1253}
1254
Martin K. Petersenac481c22009-10-03 20:52:01 +02001255static inline int bdev_io_min(struct block_device *bdev)
1256{
1257 return queue_io_min(bdev_get_queue(bdev));
1258}
1259
Martin K. Petersenc72758f2009-05-22 17:17:53 -04001260static inline unsigned int queue_io_opt(struct request_queue *q)
1261{
1262 return q->limits.io_opt;
1263}
1264
Martin K. Petersenac481c22009-10-03 20:52:01 +02001265static inline int bdev_io_opt(struct block_device *bdev)
1266{
1267 return queue_io_opt(bdev_get_queue(bdev));
1268}
1269
Martin K. Petersenc72758f2009-05-22 17:17:53 -04001270static inline int queue_alignment_offset(struct request_queue *q)
1271{
Martin K. Petersenac481c22009-10-03 20:52:01 +02001272 if (q->limits.misaligned)
Martin K. Petersenc72758f2009-05-22 17:17:53 -04001273 return -1;
1274
Martin K. Petersenac481c22009-10-03 20:52:01 +02001275 return q->limits.alignment_offset;
Martin K. Petersenc72758f2009-05-22 17:17:53 -04001276}
1277
Martin K. Petersene03a72e2010-01-11 03:21:51 -05001278static inline int queue_limit_alignment_offset(struct queue_limits *lim, sector_t sector)
Martin K. Petersen81744ee2009-12-29 08:35:35 +01001279{
1280 unsigned int granularity = max(lim->physical_block_size, lim->io_min);
Mike Snitzerb8839b82014-10-08 18:26:13 -04001281 unsigned int alignment = sector_div(sector, granularity >> 9) << 9;
Martin K. Petersen81744ee2009-12-29 08:35:35 +01001282
Mike Snitzerb8839b82014-10-08 18:26:13 -04001283 return (granularity + lim->alignment_offset - alignment) % granularity;
Martin K. Petersenc72758f2009-05-22 17:17:53 -04001284}
1285
Martin K. Petersenac481c22009-10-03 20:52:01 +02001286static inline int bdev_alignment_offset(struct block_device *bdev)
1287{
1288 struct request_queue *q = bdev_get_queue(bdev);
1289
1290 if (q->limits.misaligned)
1291 return -1;
1292
1293 if (bdev != bdev->bd_contains)
1294 return bdev->bd_part->alignment_offset;
1295
1296 return q->limits.alignment_offset;
1297}
1298
Martin K. Petersen86b37282009-11-10 11:50:21 +01001299static inline int queue_discard_alignment(struct request_queue *q)
1300{
1301 if (q->limits.discard_misaligned)
1302 return -1;
1303
1304 return q->limits.discard_alignment;
1305}
1306
Martin K. Petersene03a72e2010-01-11 03:21:51 -05001307static inline int queue_limit_discard_alignment(struct queue_limits *lim, sector_t sector)
Martin K. Petersen86b37282009-11-10 11:50:21 +01001308{
Linus Torvalds59771072012-12-19 07:18:35 -08001309 unsigned int alignment, granularity, offset;
Martin K. Petersendd3d1452010-01-11 03:21:48 -05001310
Martin K. Petersena934a002011-05-18 10:37:35 +02001311 if (!lim->max_discard_sectors)
1312 return 0;
1313
Linus Torvalds59771072012-12-19 07:18:35 -08001314 /* Why are these in bytes, not sectors? */
1315 alignment = lim->discard_alignment >> 9;
1316 granularity = lim->discard_granularity >> 9;
1317 if (!granularity)
1318 return 0;
1319
1320 /* Offset of the partition start in 'granularity' sectors */
1321 offset = sector_div(sector, granularity);
1322
1323 /* And why do we do this modulus *again* in blkdev_issue_discard()? */
1324 offset = (granularity + alignment - offset) % granularity;
1325
1326 /* Turn it back into bytes, gaah */
1327 return offset << 9;
Martin K. Petersen86b37282009-11-10 11:50:21 +01001328}
1329
Paolo Bonzinic6e66632012-08-02 09:48:50 +02001330static inline int bdev_discard_alignment(struct block_device *bdev)
1331{
1332 struct request_queue *q = bdev_get_queue(bdev);
1333
1334 if (bdev != bdev->bd_contains)
1335 return bdev->bd_part->discard_alignment;
1336
1337 return q->limits.discard_alignment;
1338}
1339
Martin K. Petersen98262f22009-12-03 09:24:48 +01001340static inline unsigned int queue_discard_zeroes_data(struct request_queue *q)
1341{
Martin K. Petersena934a002011-05-18 10:37:35 +02001342 if (q->limits.max_discard_sectors && q->limits.discard_zeroes_data == 1)
Martin K. Petersen98262f22009-12-03 09:24:48 +01001343 return 1;
1344
1345 return 0;
1346}
1347
1348static inline unsigned int bdev_discard_zeroes_data(struct block_device *bdev)
1349{
1350 return queue_discard_zeroes_data(bdev_get_queue(bdev));
1351}
1352
Martin K. Petersen4363ac72012-09-18 12:19:27 -04001353static inline unsigned int bdev_write_same(struct block_device *bdev)
1354{
1355 struct request_queue *q = bdev_get_queue(bdev);
1356
1357 if (q)
1358 return q->limits.max_write_same_sectors;
1359
1360 return 0;
1361}
1362
Jens Axboe165125e2007-07-24 09:28:11 +02001363static inline int queue_dma_alignment(struct request_queue *q)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001364{
Pete Wyckoff482eb682008-01-01 10:23:02 -05001365 return q ? q->dma_alignment : 511;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001366}
1367
Namhyung Kim14417792010-09-15 13:08:27 +02001368static inline int blk_rq_aligned(struct request_queue *q, unsigned long addr,
FUJITA Tomonori87904072008-08-28 15:05:58 +09001369 unsigned int len)
1370{
1371 unsigned int alignment = queue_dma_alignment(q) | q->dma_pad_mask;
Namhyung Kim14417792010-09-15 13:08:27 +02001372 return !(addr & alignment) && !(len & alignment);
FUJITA Tomonori87904072008-08-28 15:05:58 +09001373}
1374
Linus Torvalds1da177e2005-04-16 15:20:36 -07001375/* assumes size > 256 */
1376static inline unsigned int blksize_bits(unsigned int size)
1377{
1378 unsigned int bits = 8;
1379 do {
1380 bits++;
1381 size >>= 1;
1382 } while (size > 256);
1383 return bits;
1384}
1385
Adrian Bunk2befb9e2005-09-10 00:27:17 -07001386static inline unsigned int block_size(struct block_device *bdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001387{
1388 return bdev->bd_block_size;
1389}
1390
shaohua.li@intel.comf3876932011-05-06 11:34:32 -06001391static inline bool queue_flush_queueable(struct request_queue *q)
1392{
Jens Axboec888a8f2016-04-13 13:33:19 -06001393 return !test_bit(QUEUE_FLAG_FLUSH_NQ, &q->queue_flags);
shaohua.li@intel.comf3876932011-05-06 11:34:32 -06001394}
1395
Linus Torvalds1da177e2005-04-16 15:20:36 -07001396typedef struct {struct page *v;} Sector;
1397
1398unsigned char *read_dev_sector(struct block_device *, sector_t, Sector *);
1399
1400static inline void put_dev_sector(Sector p)
1401{
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001402 put_page(p.v);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001403}
1404
Ming Leie0af2912016-02-26 23:40:51 +08001405static inline bool __bvec_gap_to_prev(struct request_queue *q,
1406 struct bio_vec *bprv, unsigned int offset)
1407{
1408 return offset ||
1409 ((bprv->bv_offset + bprv->bv_len) & queue_virt_boundary(q));
1410}
1411
Keith Busch03100aa2015-08-19 14:24:05 -07001412/*
1413 * Check if adding a bio_vec after bprv with offset would create a gap in
1414 * the SG list. Most drivers don't care about this, but some do.
1415 */
1416static inline bool bvec_gap_to_prev(struct request_queue *q,
1417 struct bio_vec *bprv, unsigned int offset)
1418{
1419 if (!queue_virt_boundary(q))
1420 return false;
Ming Leie0af2912016-02-26 23:40:51 +08001421 return __bvec_gap_to_prev(q, bprv, offset);
Keith Busch03100aa2015-08-19 14:24:05 -07001422}
1423
Jens Axboe5e7c4272015-09-03 19:28:20 +03001424static inline bool bio_will_gap(struct request_queue *q, struct bio *prev,
1425 struct bio *next)
1426{
Ming Lei25e71a92016-02-26 23:40:52 +08001427 if (bio_has_data(prev) && queue_virt_boundary(q)) {
1428 struct bio_vec pb, nb;
Jens Axboe5e7c4272015-09-03 19:28:20 +03001429
Ming Lei25e71a92016-02-26 23:40:52 +08001430 bio_get_last_bvec(prev, &pb);
1431 bio_get_first_bvec(next, &nb);
1432
1433 return __bvec_gap_to_prev(q, &pb, nb.bv_offset);
1434 }
1435
1436 return false;
Jens Axboe5e7c4272015-09-03 19:28:20 +03001437}
1438
1439static inline bool req_gap_back_merge(struct request *req, struct bio *bio)
1440{
1441 return bio_will_gap(req->q, req->biotail, bio);
1442}
1443
1444static inline bool req_gap_front_merge(struct request *req, struct bio *bio)
1445{
1446 return bio_will_gap(req->q, bio, req->bio);
1447}
1448
Jens Axboe59c3d452014-04-08 09:15:35 -06001449int kblockd_schedule_work(struct work_struct *work);
Jens Axboeee63cfa2016-08-24 15:52:48 -06001450int kblockd_schedule_work_on(int cpu, struct work_struct *work);
Jens Axboe59c3d452014-04-08 09:15:35 -06001451int kblockd_schedule_delayed_work(struct delayed_work *dwork, unsigned long delay);
Jens Axboe8ab14592014-04-08 09:17:40 -06001452int kblockd_schedule_delayed_work_on(int cpu, struct delayed_work *dwork, unsigned long delay);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001453
Divyesh Shah91952912010-04-01 15:01:41 -07001454#ifdef CONFIG_BLK_CGROUP
Jens Axboe28f41972010-06-01 12:23:18 +02001455/*
1456 * This should not be using sched_clock(). A real patch is in progress
1457 * to fix this up, until that is in place we need to disable preemption
1458 * around sched_clock() in this function and set_io_start_time_ns().
1459 */
Divyesh Shah91952912010-04-01 15:01:41 -07001460static inline void set_start_time_ns(struct request *req)
1461{
Jens Axboe28f41972010-06-01 12:23:18 +02001462 preempt_disable();
Divyesh Shah91952912010-04-01 15:01:41 -07001463 req->start_time_ns = sched_clock();
Jens Axboe28f41972010-06-01 12:23:18 +02001464 preempt_enable();
Divyesh Shah91952912010-04-01 15:01:41 -07001465}
1466
1467static inline void set_io_start_time_ns(struct request *req)
1468{
Jens Axboe28f41972010-06-01 12:23:18 +02001469 preempt_disable();
Divyesh Shah91952912010-04-01 15:01:41 -07001470 req->io_start_time_ns = sched_clock();
Jens Axboe28f41972010-06-01 12:23:18 +02001471 preempt_enable();
Divyesh Shah91952912010-04-01 15:01:41 -07001472}
Divyesh Shah84c124d2010-04-09 08:31:19 +02001473
1474static inline uint64_t rq_start_time_ns(struct request *req)
1475{
1476 return req->start_time_ns;
1477}
1478
1479static inline uint64_t rq_io_start_time_ns(struct request *req)
1480{
1481 return req->io_start_time_ns;
1482}
Divyesh Shah91952912010-04-01 15:01:41 -07001483#else
1484static inline void set_start_time_ns(struct request *req) {}
1485static inline void set_io_start_time_ns(struct request *req) {}
Divyesh Shah84c124d2010-04-09 08:31:19 +02001486static inline uint64_t rq_start_time_ns(struct request *req)
1487{
1488 return 0;
1489}
1490static inline uint64_t rq_io_start_time_ns(struct request *req)
1491{
1492 return 0;
1493}
Divyesh Shah91952912010-04-01 15:01:41 -07001494#endif
1495
Linus Torvalds1da177e2005-04-16 15:20:36 -07001496#define MODULE_ALIAS_BLOCKDEV(major,minor) \
1497 MODULE_ALIAS("block-major-" __stringify(major) "-" __stringify(minor))
1498#define MODULE_ALIAS_BLOCKDEV_MAJOR(major) \
1499 MODULE_ALIAS("block-major-" __stringify(major) "-*")
1500
Martin K. Petersen7ba1ba12008-06-30 20:04:41 +02001501#if defined(CONFIG_BLK_DEV_INTEGRITY)
1502
Martin K. Petersen8288f492014-09-26 19:20:02 -04001503enum blk_integrity_flags {
1504 BLK_INTEGRITY_VERIFY = 1 << 0,
1505 BLK_INTEGRITY_GENERATE = 1 << 1,
Martin K. Petersen3aec2f42014-09-26 19:20:03 -04001506 BLK_INTEGRITY_DEVICE_CAPABLE = 1 << 2,
Martin K. Petersenaae7df52014-09-26 19:20:05 -04001507 BLK_INTEGRITY_IP_CHECKSUM = 1 << 3,
Martin K. Petersen8288f492014-09-26 19:20:02 -04001508};
Martin K. Petersen7ba1ba12008-06-30 20:04:41 +02001509
Martin K. Petersen18593082014-09-26 19:20:01 -04001510struct blk_integrity_iter {
Martin K. Petersen7ba1ba12008-06-30 20:04:41 +02001511 void *prot_buf;
1512 void *data_buf;
Martin K. Petersen3be91c42014-09-26 19:19:59 -04001513 sector_t seed;
Martin K. Petersen7ba1ba12008-06-30 20:04:41 +02001514 unsigned int data_size;
Martin K. Petersen3be91c42014-09-26 19:19:59 -04001515 unsigned short interval;
Martin K. Petersen7ba1ba12008-06-30 20:04:41 +02001516 const char *disk_name;
1517};
1518
Martin K. Petersen18593082014-09-26 19:20:01 -04001519typedef int (integrity_processing_fn) (struct blk_integrity_iter *);
Martin K. Petersen7ba1ba12008-06-30 20:04:41 +02001520
Martin K. Petersen0f8087e2015-10-21 13:19:33 -04001521struct blk_integrity_profile {
1522 integrity_processing_fn *generate_fn;
1523 integrity_processing_fn *verify_fn;
1524 const char *name;
1525};
1526
Martin K. Petersen25520d52015-10-21 13:19:49 -04001527extern void blk_integrity_register(struct gendisk *, struct blk_integrity *);
Martin K. Petersen7ba1ba12008-06-30 20:04:41 +02001528extern void blk_integrity_unregister(struct gendisk *);
Martin K. Petersenad7fce92008-10-01 03:38:39 -04001529extern int blk_integrity_compare(struct gendisk *, struct gendisk *);
Martin K. Petersen13f05c82010-09-10 20:50:10 +02001530extern int blk_rq_map_integrity_sg(struct request_queue *, struct bio *,
1531 struct scatterlist *);
1532extern int blk_rq_count_integrity_sg(struct request_queue *, struct bio *);
Martin K. Petersen4eaf99b2014-09-26 19:20:06 -04001533extern bool blk_integrity_merge_rq(struct request_queue *, struct request *,
1534 struct request *);
1535extern bool blk_integrity_merge_bio(struct request_queue *, struct request *,
1536 struct bio *);
Martin K. Petersen7ba1ba12008-06-30 20:04:41 +02001537
Martin K. Petersen25520d52015-10-21 13:19:49 -04001538static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk)
1539{
Dan Williamsac6fc482015-10-21 13:20:18 -04001540 struct blk_integrity *bi = &disk->queue->integrity;
Martin K. Petersen25520d52015-10-21 13:19:49 -04001541
1542 if (!bi->profile)
1543 return NULL;
1544
1545 return bi;
1546}
1547
Jens Axboeb04accc2008-10-02 12:53:22 +02001548static inline
1549struct blk_integrity *bdev_get_integrity(struct block_device *bdev)
1550{
Martin K. Petersen25520d52015-10-21 13:19:49 -04001551 return blk_get_integrity(bdev->bd_disk);
Martin K. Petersenb02739b2008-10-02 18:47:49 +02001552}
1553
Martin K. Petersen180b2f92014-09-26 19:19:56 -04001554static inline bool blk_integrity_rq(struct request *rq)
Martin K. Petersen7ba1ba12008-06-30 20:04:41 +02001555{
Martin K. Petersen180b2f92014-09-26 19:19:56 -04001556 return rq->cmd_flags & REQ_INTEGRITY;
Martin K. Petersen7ba1ba12008-06-30 20:04:41 +02001557}
1558
Martin K. Petersen13f05c82010-09-10 20:50:10 +02001559static inline void blk_queue_max_integrity_segments(struct request_queue *q,
1560 unsigned int segs)
1561{
1562 q->limits.max_integrity_segments = segs;
1563}
1564
1565static inline unsigned short
1566queue_max_integrity_segments(struct request_queue *q)
1567{
1568 return q->limits.max_integrity_segments;
1569}
1570
Sagi Grimberg7f39add2015-09-11 09:03:04 -06001571static inline bool integrity_req_gap_back_merge(struct request *req,
1572 struct bio *next)
1573{
1574 struct bio_integrity_payload *bip = bio_integrity(req->bio);
1575 struct bio_integrity_payload *bip_next = bio_integrity(next);
1576
1577 return bvec_gap_to_prev(req->q, &bip->bip_vec[bip->bip_vcnt - 1],
1578 bip_next->bip_vec[0].bv_offset);
1579}
1580
1581static inline bool integrity_req_gap_front_merge(struct request *req,
1582 struct bio *bio)
1583{
1584 struct bio_integrity_payload *bip = bio_integrity(bio);
1585 struct bio_integrity_payload *bip_next = bio_integrity(req->bio);
1586
1587 return bvec_gap_to_prev(req->q, &bip->bip_vec[bip->bip_vcnt - 1],
1588 bip_next->bip_vec[0].bv_offset);
1589}
1590
Martin K. Petersen7ba1ba12008-06-30 20:04:41 +02001591#else /* CONFIG_BLK_DEV_INTEGRITY */
1592
Stephen Rothwellfd832402012-01-12 09:17:30 +01001593struct bio;
1594struct block_device;
1595struct gendisk;
1596struct blk_integrity;
1597
1598static inline int blk_integrity_rq(struct request *rq)
1599{
1600 return 0;
1601}
1602static inline int blk_rq_count_integrity_sg(struct request_queue *q,
1603 struct bio *b)
1604{
1605 return 0;
1606}
1607static inline int blk_rq_map_integrity_sg(struct request_queue *q,
1608 struct bio *b,
1609 struct scatterlist *s)
1610{
1611 return 0;
1612}
1613static inline struct blk_integrity *bdev_get_integrity(struct block_device *b)
1614{
Michele Curti61a04e52014-10-09 15:30:17 -07001615 return NULL;
Stephen Rothwellfd832402012-01-12 09:17:30 +01001616}
1617static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk)
1618{
1619 return NULL;
1620}
1621static inline int blk_integrity_compare(struct gendisk *a, struct gendisk *b)
1622{
1623 return 0;
1624}
Martin K. Petersen25520d52015-10-21 13:19:49 -04001625static inline void blk_integrity_register(struct gendisk *d,
Stephen Rothwellfd832402012-01-12 09:17:30 +01001626 struct blk_integrity *b)
1627{
Stephen Rothwellfd832402012-01-12 09:17:30 +01001628}
1629static inline void blk_integrity_unregister(struct gendisk *d)
1630{
1631}
1632static inline void blk_queue_max_integrity_segments(struct request_queue *q,
1633 unsigned int segs)
1634{
1635}
1636static inline unsigned short queue_max_integrity_segments(struct request_queue *q)
1637{
1638 return 0;
1639}
Martin K. Petersen4eaf99b2014-09-26 19:20:06 -04001640static inline bool blk_integrity_merge_rq(struct request_queue *rq,
1641 struct request *r1,
1642 struct request *r2)
Stephen Rothwellfd832402012-01-12 09:17:30 +01001643{
Martin K. Petersencb1a5ab2014-10-28 20:27:43 -06001644 return true;
Stephen Rothwellfd832402012-01-12 09:17:30 +01001645}
Martin K. Petersen4eaf99b2014-09-26 19:20:06 -04001646static inline bool blk_integrity_merge_bio(struct request_queue *rq,
1647 struct request *r,
1648 struct bio *b)
Stephen Rothwellfd832402012-01-12 09:17:30 +01001649{
Martin K. Petersencb1a5ab2014-10-28 20:27:43 -06001650 return true;
Stephen Rothwellfd832402012-01-12 09:17:30 +01001651}
Martin K. Petersen25520d52015-10-21 13:19:49 -04001652
Sagi Grimberg7f39add2015-09-11 09:03:04 -06001653static inline bool integrity_req_gap_back_merge(struct request *req,
1654 struct bio *next)
1655{
1656 return false;
1657}
1658static inline bool integrity_req_gap_front_merge(struct request *req,
1659 struct bio *bio)
1660{
1661 return false;
1662}
Martin K. Petersen7ba1ba12008-06-30 20:04:41 +02001663
1664#endif /* CONFIG_BLK_DEV_INTEGRITY */
1665
Dan Williamsb2e0d162016-01-15 16:55:59 -08001666/**
1667 * struct blk_dax_ctl - control and output parameters for ->direct_access
1668 * @sector: (input) offset relative to a block_device
1669 * @addr: (output) kernel virtual address for @sector populated by driver
1670 * @pfn: (output) page frame number for @addr populated by driver
1671 * @size: (input) number of bytes requested
1672 */
1673struct blk_dax_ctl {
1674 sector_t sector;
Dan Williams7a9eb202016-06-03 18:06:47 -07001675 void *addr;
Dan Williamsb2e0d162016-01-15 16:55:59 -08001676 long size;
Dan Williams34c0fd52016-01-15 16:56:14 -08001677 pfn_t pfn;
Dan Williamsb2e0d162016-01-15 16:55:59 -08001678};
1679
Al Viro08f85852007-10-08 13:26:20 -04001680struct block_device_operations {
Al Virod4430d622008-03-02 09:09:22 -05001681 int (*open) (struct block_device *, fmode_t);
Al Virodb2a1442013-05-05 21:52:57 -04001682 void (*release) (struct gendisk *, fmode_t);
Jens Axboec11f0c02016-08-05 08:11:04 -06001683 int (*rw_page)(struct block_device *, sector_t, struct page *, bool);
Al Virod4430d622008-03-02 09:09:22 -05001684 int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
1685 int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
Dan Williams7a9eb202016-06-03 18:06:47 -07001686 long (*direct_access)(struct block_device *, sector_t, void **, pfn_t *,
1687 long);
Tejun Heo77ea8872010-12-08 20:57:37 +01001688 unsigned int (*check_events) (struct gendisk *disk,
1689 unsigned int clearing);
1690 /* ->media_changed() is DEPRECATED, use ->check_events() instead */
Al Viro08f85852007-10-08 13:26:20 -04001691 int (*media_changed) (struct gendisk *);
Tejun Heoc3e33e02010-05-15 20:09:29 +02001692 void (*unlock_native_capacity) (struct gendisk *);
Al Viro08f85852007-10-08 13:26:20 -04001693 int (*revalidate_disk) (struct gendisk *);
1694 int (*getgeo)(struct block_device *, struct hd_geometry *);
Nitin Guptab3a27d02010-05-17 11:02:43 +05301695 /* this callback is with swap_lock and sometimes page table lock held */
1696 void (*swap_slot_free_notify) (struct block_device *, unsigned long);
Al Viro08f85852007-10-08 13:26:20 -04001697 struct module *owner;
Christoph Hellwigbbd3e062015-10-15 14:10:48 +02001698 const struct pr_ops *pr_ops;
Al Viro08f85852007-10-08 13:26:20 -04001699};
1700
Al Viro633a08b2007-08-29 20:34:12 -04001701extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
1702 unsigned long);
Matthew Wilcox47a191f2014-06-04 16:07:46 -07001703extern int bdev_read_page(struct block_device *, sector_t, struct page *);
1704extern int bdev_write_page(struct block_device *, sector_t, struct page *,
1705 struct writeback_control *);
Dan Williamsb2e0d162016-01-15 16:55:59 -08001706extern long bdev_direct_access(struct block_device *, struct blk_dax_ctl *);
Toshi Kani2d96afc2016-05-10 10:23:53 -06001707extern int bdev_dax_supported(struct super_block *, int);
Toshi Kania8078b12016-05-10 10:23:57 -06001708extern bool bdev_dax_capable(struct block_device *);
Mohan Srinivasane2d88782016-12-14 15:55:36 -08001709
1710/*
1711 * X-axis for IO latency histogram support.
1712 */
1713static const u_int64_t latency_x_axis_us[] = {
1714 100,
1715 200,
1716 300,
1717 400,
1718 500,
1719 600,
1720 700,
1721 800,
1722 900,
1723 1000,
1724 1200,
1725 1400,
1726 1600,
1727 1800,
1728 2000,
1729 2500,
1730 3000,
1731 4000,
1732 5000,
1733 6000,
1734 7000,
1735 9000,
1736 10000
1737};
1738
1739#define BLK_IO_LAT_HIST_DISABLE 0
1740#define BLK_IO_LAT_HIST_ENABLE 1
1741#define BLK_IO_LAT_HIST_ZERO 2
1742
1743struct io_latency_state {
1744 u_int64_t latency_y_axis_read[ARRAY_SIZE(latency_x_axis_us) + 1];
1745 u_int64_t latency_reads_elems;
1746 u_int64_t latency_y_axis_write[ARRAY_SIZE(latency_x_axis_us) + 1];
1747 u_int64_t latency_writes_elems;
1748};
1749
1750static inline void
1751blk_update_latency_hist(struct io_latency_state *s,
1752 int read,
1753 u_int64_t delta_us)
1754{
1755 int i;
1756
1757 for (i = 0; i < ARRAY_SIZE(latency_x_axis_us); i++) {
1758 if (delta_us < (u_int64_t)latency_x_axis_us[i]) {
1759 if (read)
1760 s->latency_y_axis_read[i]++;
1761 else
1762 s->latency_y_axis_write[i]++;
1763 break;
1764 }
1765 }
1766 if (i == ARRAY_SIZE(latency_x_axis_us)) {
1767 /* Overflowed the histogram */
1768 if (read)
1769 s->latency_y_axis_read[i]++;
1770 else
1771 s->latency_y_axis_write[i]++;
1772 }
1773 if (read)
1774 s->latency_reads_elems++;
1775 else
1776 s->latency_writes_elems++;
1777}
1778
1779void blk_zero_latency_hist(struct io_latency_state *s);
1780ssize_t blk_latency_hist_show(struct io_latency_state *s, char *buf);
1781
David Howells93614012006-09-30 20:45:40 +02001782#else /* CONFIG_BLOCK */
Fabian Frederickac13a822014-06-04 16:06:27 -07001783
1784struct block_device;
1785
David Howells93614012006-09-30 20:45:40 +02001786/*
1787 * stubs for when the block layer is configured out
1788 */
1789#define buffer_heads_over_limit 0
1790
David Howells93614012006-09-30 20:45:40 +02001791static inline long nr_blockdev_pages(void)
1792{
1793 return 0;
1794}
1795
Jens Axboe1f940bd2011-03-11 20:17:08 +01001796struct blk_plug {
1797};
1798
1799static inline void blk_start_plug(struct blk_plug *plug)
Jens Axboe73c10102011-03-08 13:19:51 +01001800{
1801}
1802
Jens Axboe1f940bd2011-03-11 20:17:08 +01001803static inline void blk_finish_plug(struct blk_plug *plug)
Jens Axboe73c10102011-03-08 13:19:51 +01001804{
1805}
1806
Jens Axboe1f940bd2011-03-11 20:17:08 +01001807static inline void blk_flush_plug(struct task_struct *task)
Jens Axboe73c10102011-03-08 13:19:51 +01001808{
1809}
1810
Jens Axboea237c1c2011-04-16 13:27:55 +02001811static inline void blk_schedule_flush_plug(struct task_struct *task)
1812{
1813}
1814
1815
Jens Axboe73c10102011-03-08 13:19:51 +01001816static inline bool blk_needs_flush_plug(struct task_struct *tsk)
1817{
1818 return false;
1819}
1820
Fabian Frederickac13a822014-06-04 16:06:27 -07001821static inline int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask,
1822 sector_t *error_sector)
1823{
1824 return 0;
1825}
1826
David Howells93614012006-09-30 20:45:40 +02001827#endif /* CONFIG_BLOCK */
1828
Linus Torvalds1da177e2005-04-16 15:20:36 -07001829#endif