blob: 0693c3e65b02b177b6ee8f819ef090941febe7bf [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001#ifndef _LINUX_BLKDEV_H
2#define _LINUX_BLKDEV_H
3
Russell King85fd0bc2012-05-14 08:29:23 +02004#include <linux/sched.h>
5
Jens Axboef5ff8422007-09-21 09:19:54 +02006#ifdef CONFIG_BLOCK
7
Linus Torvalds1da177e2005-04-16 15:20:36 -07008#include <linux/major.h>
9#include <linux/genhd.h>
10#include <linux/list.h>
Jens Axboe320ae512013-10-24 09:20:05 +010011#include <linux/llist.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070012#include <linux/timer.h>
13#include <linux/workqueue.h>
14#include <linux/pagemap.h>
Tejun Heo66114ca2015-05-22 17:13:32 -040015#include <linux/backing-dev-defs.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070016#include <linux/wait.h>
17#include <linux/mempool.h>
Dan Williams34c0fd52016-01-15 16:56:14 -080018#include <linux/pfn.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070019#include <linux/bio.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070020#include <linux/stringify.h>
Hugh Dickins3e6053d2008-09-11 10:57:55 +020021#include <linux/gfp.h>
FUJITA Tomonorid351af02007-07-09 12:40:35 +020022#include <linux/bsg.h>
Jens Axboec7c22e42008-09-13 20:26:01 +020023#include <linux/smp.h>
Tejun Heo548bc8e2013-01-09 08:05:13 -080024#include <linux/rcupdate.h>
Tejun Heoadd703f2014-07-01 10:34:38 -060025#include <linux/percpu-refcount.h>
Christoph Hellwig84be4562015-05-01 12:46:15 +020026#include <linux/scatterlist.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070027
Paul Gortmakerde477252011-05-26 13:46:22 -040028struct module;
Christoph Hellwig21b2f0c2006-03-22 17:52:04 +010029struct scsi_ioctl_command;
30
Linus Torvalds1da177e2005-04-16 15:20:36 -070031struct request_queue;
Linus Torvalds1da177e2005-04-16 15:20:36 -070032struct elevator_queue;
Jens Axboe2056a782006-03-23 20:00:26 +010033struct blk_trace;
Jens Axboe3d6392c2007-07-09 12:38:05 +020034struct request;
35struct sg_io_hdr;
Mike Christieaa387cc2011-07-31 22:05:09 +020036struct bsg_job;
Tejun Heo3c798392012-04-16 13:57:25 -070037struct blkcg_gq;
Ming Lei7c94e1c2014-09-25 23:23:43 +080038struct blk_flush_queue;
Christoph Hellwigbbd3e062015-10-15 14:10:48 +020039struct pr_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -070040
41#define BLKDEV_MIN_RQ 4
42#define BLKDEV_MAX_RQ 128 /* Default maximum */
43
Tejun Heo8bd435b2012-04-13 13:11:28 -070044/*
45 * Maximum number of blkcg policies allowed to be registered concurrently.
46 * Defined here to simplify include dependency.
47 */
48#define BLKCG_MAX_POLS 2
49
Tejun Heo8ffdc652006-01-06 09:49:03 +010050typedef void (rq_end_io_fn)(struct request *, int);
Linus Torvalds1da177e2005-04-16 15:20:36 -070051
Tejun Heo5b788ce2012-06-04 20:40:59 -070052#define BLK_RL_SYNCFULL (1U << 0)
53#define BLK_RL_ASYNCFULL (1U << 1)
54
Linus Torvalds1da177e2005-04-16 15:20:36 -070055struct request_list {
Tejun Heo5b788ce2012-06-04 20:40:59 -070056 struct request_queue *q; /* the queue this rl belongs to */
Tejun Heoa0516612012-06-26 15:05:44 -070057#ifdef CONFIG_BLK_CGROUP
58 struct blkcg_gq *blkg; /* blkg this request pool belongs to */
59#endif
Jens Axboe1faa16d2009-04-06 14:48:01 +020060 /*
61 * count[], starved[], and wait[] are indexed by
62 * BLK_RW_SYNC/BLK_RW_ASYNC
63 */
Tejun Heo8a5ecdd2012-06-04 20:40:58 -070064 int count[2];
65 int starved[2];
66 mempool_t *rq_pool;
67 wait_queue_head_t wait[2];
Tejun Heo5b788ce2012-06-04 20:40:59 -070068 unsigned int flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -070069};
70
Jens Axboe4aff5e22006-08-10 08:44:47 +020071/*
72 * request command types
73 */
74enum rq_cmd_type_bits {
75 REQ_TYPE_FS = 1, /* fs request */
76 REQ_TYPE_BLOCK_PC, /* scsi command */
Christoph Hellwigb42171e2015-04-17 22:37:17 +020077 REQ_TYPE_DRV_PRIV, /* driver defined types from here */
Jens Axboe4aff5e22006-08-10 08:44:47 +020078};
79
Linus Torvalds1da177e2005-04-16 15:20:36 -070080#define BLK_MAX_CDB 16
81
82/*
Christoph Hellwigaf76e552014-05-06 12:12:45 +020083 * Try to put the fields that are referenced together in the same cacheline.
84 *
85 * If you modify this structure, make sure to update blk_rq_init() and
86 * especially blk_mq_rq_ctx_init() to take care of the added fields.
Linus Torvalds1da177e2005-04-16 15:20:36 -070087 */
88struct request {
Christoph Hellwig6897fc22014-01-30 15:45:47 -080089 struct list_head queuelist;
Jens Axboe320ae512013-10-24 09:20:05 +010090 union {
91 struct call_single_data csd;
Jan Kara9828c2c2016-06-28 09:03:59 +020092 u64 fifo_time;
Jens Axboe320ae512013-10-24 09:20:05 +010093 };
Jens Axboeff856ba2006-01-09 16:02:34 +010094
Jens Axboe165125e2007-07-24 09:28:11 +020095 struct request_queue *q;
Jens Axboe320ae512013-10-24 09:20:05 +010096 struct blk_mq_ctx *mq_ctx;
Jens Axboee6a1c872006-08-10 09:00:21 +020097
Richard Kennedy181fdde2010-03-19 08:58:16 +010098 int cpu;
Christoph Hellwigca93e452016-06-09 16:00:35 +020099 unsigned cmd_type;
100 u64 cmd_flags;
101 unsigned long atomic_flags;
Richard Kennedy181fdde2010-03-19 08:58:16 +0100102
Tejun Heoa2dec7b2009-05-07 22:24:44 +0900103 /* the following two fields are internal, NEVER access directly */
Tejun Heoa2dec7b2009-05-07 22:24:44 +0900104 unsigned int __data_len; /* total data len */
Richard Kennedy181fdde2010-03-19 08:58:16 +0100105 sector_t __sector; /* sector cursor */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700106
107 struct bio *bio;
108 struct bio *biotail;
109
Jens Axboe360f92c2014-04-09 20:27:01 -0600110 /*
111 * The hash is used inside the scheduler, and killed once the
112 * request reaches the dispatch list. The ipi_list is only used
113 * to queue the request for softirq completion, which is long
114 * after the request has been unhashed (and even removed from
115 * the dispatch list).
116 */
117 union {
118 struct hlist_node hash; /* merge hash */
119 struct list_head ipi_list;
120 };
121
Jens Axboee6a1c872006-08-10 09:00:21 +0200122 /*
123 * The rb_node is only used inside the io scheduler, requests
124 * are pruned when moved to the dispatch queue. So let the
Mike Snitzerc1867942011-02-11 11:08:00 +0100125 * completion_data share space with the rb_node.
Jens Axboee6a1c872006-08-10 09:00:21 +0200126 */
127 union {
128 struct rb_node rb_node; /* sort/lookup */
Mike Snitzerc1867942011-02-11 11:08:00 +0100129 void *completion_data;
Jens Axboee6a1c872006-08-10 09:00:21 +0200130 };
Jens Axboe98170642006-07-28 09:23:08 +0200131
Jens Axboeff7d1452006-07-12 14:04:37 +0200132 /*
Vivek Goyal7f1dc8a2010-04-21 17:44:16 +0200133 * Three pointers are available for the IO schedulers, if they need
Mike Snitzerc1867942011-02-11 11:08:00 +0100134 * more they have to dynamically allocate it. Flush requests are
135 * never put on the IO scheduler. So let the flush fields share
Tejun Heoa612fdd2011-12-14 00:33:41 +0100136 * space with the elevator data.
Jens Axboeff7d1452006-07-12 14:04:37 +0200137 */
Mike Snitzerc1867942011-02-11 11:08:00 +0100138 union {
Tejun Heoa612fdd2011-12-14 00:33:41 +0100139 struct {
140 struct io_cq *icq;
141 void *priv[2];
142 } elv;
143
Mike Snitzerc1867942011-02-11 11:08:00 +0100144 struct {
145 unsigned int seq;
146 struct list_head list;
Jeff Moyer4853aba2011-08-15 21:37:25 +0200147 rq_end_io_fn *saved_end_io;
Mike Snitzerc1867942011-02-11 11:08:00 +0100148 } flush;
149 };
Jens Axboeff7d1452006-07-12 14:04:37 +0200150
Jens Axboe8f34ee72006-06-13 09:02:34 +0200151 struct gendisk *rq_disk;
Jerome Marchand09e099d2011-01-05 16:57:38 +0100152 struct hd_struct *part;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700153 unsigned long start_time;
Divyesh Shah91952912010-04-01 15:01:41 -0700154#ifdef CONFIG_BLK_CGROUP
Tejun Heoa0516612012-06-26 15:05:44 -0700155 struct request_list *rl; /* rl this rq is alloced from */
Divyesh Shah91952912010-04-01 15:01:41 -0700156 unsigned long long start_time_ns;
157 unsigned long long io_start_time_ns; /* when passed to hardware */
158#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700159 /* Number of scatter-gather DMA addr+len pairs after
160 * physical address coalescing is performed.
161 */
162 unsigned short nr_phys_segments;
Martin K. Petersen13f05c82010-09-10 20:50:10 +0200163#if defined(CONFIG_BLK_DEV_INTEGRITY)
164 unsigned short nr_integrity_segments;
165#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700166
Jens Axboe8f34ee72006-06-13 09:02:34 +0200167 unsigned short ioprio;
168
Tejun Heo731ec492009-04-23 11:05:20 +0900169 void *special; /* opaque pointer available for LLD use */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700170
Jens Axboecdd60262006-07-28 09:32:07 +0200171 int tag;
172 int errors;
173
Linus Torvalds1da177e2005-04-16 15:20:36 -0700174 /*
175 * when request is used as a packet command carrier
176 */
FUJITA Tomonorid7e3c322008-04-29 09:54:39 +0200177 unsigned char __cmd[BLK_MAX_CDB];
178 unsigned char *cmd;
Richard Kennedy181fdde2010-03-19 08:58:16 +0100179 unsigned short cmd_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700180
FUJITA Tomonori7a85f882008-03-04 11:17:11 +0100181 unsigned int extra_len; /* length of alignment and padding */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700182 unsigned int sense_len;
Tejun Heoc3a4d782009-05-07 22:24:37 +0900183 unsigned int resid_len; /* residual count */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700184 void *sense;
185
Jens Axboe242f9dc2008-09-14 05:55:09 -0700186 unsigned long deadline;
187 struct list_head timeout_list;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700188 unsigned int timeout;
Mike Christie17e01f22005-11-11 05:31:37 -0600189 int retries;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700190
191 /*
Jens Axboec00895a2006-09-30 20:29:12 +0200192 * completion callback.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700193 */
194 rq_end_io_fn *end_io;
195 void *end_io_data;
FUJITA Tomonoriabae1fd2007-07-16 08:52:14 +0200196
197 /* for bidi */
198 struct request *next_rq;
Mohan Srinivasane2d88782016-12-14 15:55:36 -0800199
200 ktime_t lat_hist_io_start;
201 int lat_hist_enabled;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700202};
203
Mike Christie4e1b2d522016-06-05 14:32:22 -0500204#define REQ_OP_SHIFT (8 * sizeof(u64) - REQ_OP_BITS)
205#define req_op(req) ((req)->cmd_flags >> REQ_OP_SHIFT)
206
207#define req_set_op(req, op) do { \
208 WARN_ON(op >= (1 << REQ_OP_BITS)); \
209 (req)->cmd_flags &= ((1ULL << REQ_OP_SHIFT) - 1); \
210 (req)->cmd_flags |= ((u64) (op) << REQ_OP_SHIFT); \
211} while (0)
212
Mike Christief2150822016-06-05 14:31:42 -0500213#define req_set_op_attrs(req, op, flags) do { \
214 req_set_op(req, op); \
215 (req)->cmd_flags |= flags; \
216} while (0)
217
Fernando Luis Vázquez Cao766ca442008-08-14 09:59:13 +0200218static inline unsigned short req_get_ioprio(struct request *req)
219{
220 return req->ioprio;
221}
222
Linus Torvalds1da177e2005-04-16 15:20:36 -0700223#include <linux/elevator.h>
224
Jens Axboe320ae512013-10-24 09:20:05 +0100225struct blk_queue_ctx;
226
Jens Axboe165125e2007-07-24 09:28:11 +0200227typedef void (request_fn_proc) (struct request_queue *q);
Jens Axboedece1632015-11-05 10:41:16 -0700228typedef blk_qc_t (make_request_fn) (struct request_queue *q, struct bio *bio);
Jens Axboe165125e2007-07-24 09:28:11 +0200229typedef int (prep_rq_fn) (struct request_queue *, struct request *);
James Bottomley28018c22010-07-01 19:49:17 +0900230typedef void (unprep_rq_fn) (struct request_queue *, struct request *);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700231
232struct bio_vec;
Jens Axboeff856ba2006-01-09 16:02:34 +0100233typedef void (softirq_done_fn)(struct request *);
Tejun Heo2fb98e82008-02-19 11:36:53 +0100234typedef int (dma_drain_needed_fn)(struct request *);
Kiyoshi Uedaef9e3fa2008-10-01 16:12:15 +0200235typedef int (lld_busy_fn) (struct request_queue *q);
Mike Christieaa387cc2011-07-31 22:05:09 +0200236typedef int (bsg_job_fn) (struct bsg_job *);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700237
Jens Axboe242f9dc2008-09-14 05:55:09 -0700238enum blk_eh_timer_return {
239 BLK_EH_NOT_HANDLED,
240 BLK_EH_HANDLED,
241 BLK_EH_RESET_TIMER,
242};
243
244typedef enum blk_eh_timer_return (rq_timed_out_fn)(struct request *);
245
Linus Torvalds1da177e2005-04-16 15:20:36 -0700246enum blk_queue_state {
247 Queue_down,
248 Queue_up,
249};
250
Linus Torvalds1da177e2005-04-16 15:20:36 -0700251struct blk_queue_tag {
252 struct request **tag_index; /* map of busy tags */
253 unsigned long *tag_map; /* bit map of free/busy tags */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700254 int busy; /* current depth */
255 int max_depth; /* what we will send to device */
Tejun Heoba025082005-08-05 13:28:11 -0700256 int real_max_depth; /* what the array can hold */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700257 atomic_t refcnt; /* map can be shared */
Shaohua Liee1b6f72015-01-15 17:32:25 -0800258 int alloc_policy; /* tag allocation policy */
259 int next_tag; /* next tag */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700260};
Shaohua Liee1b6f72015-01-15 17:32:25 -0800261#define BLK_TAG_ALLOC_FIFO 0 /* allocate starting from 0 */
262#define BLK_TAG_ALLOC_RR 1 /* allocate starting from last allocated tag */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700263
FUJITA Tomonoriabf54392008-08-16 14:10:05 +0900264#define BLK_SCSI_MAX_CMDS (256)
265#define BLK_SCSI_CMD_PER_LONG (BLK_SCSI_MAX_CMDS / (sizeof(long) * 8))
266
Martin K. Petersen025146e2009-05-22 17:17:51 -0400267struct queue_limits {
268 unsigned long bounce_pfn;
269 unsigned long seg_boundary_mask;
Keith Busch03100aa2015-08-19 14:24:05 -0700270 unsigned long virt_boundary_mask;
Martin K. Petersen025146e2009-05-22 17:17:51 -0400271
272 unsigned int max_hw_sectors;
Martin K. Petersenca369d52015-11-13 16:46:48 -0500273 unsigned int max_dev_sectors;
Jens Axboe762380a2014-06-05 13:38:39 -0600274 unsigned int chunk_sectors;
Martin K. Petersen025146e2009-05-22 17:17:51 -0400275 unsigned int max_sectors;
276 unsigned int max_segment_size;
Martin K. Petersenc72758f2009-05-22 17:17:53 -0400277 unsigned int physical_block_size;
278 unsigned int alignment_offset;
279 unsigned int io_min;
280 unsigned int io_opt;
Christoph Hellwig67efc922009-09-30 13:54:20 +0200281 unsigned int max_discard_sectors;
Jens Axboe0034af02015-07-16 09:14:26 -0600282 unsigned int max_hw_discard_sectors;
Martin K. Petersen4363ac72012-09-18 12:19:27 -0400283 unsigned int max_write_same_sectors;
Martin K. Petersen86b37282009-11-10 11:50:21 +0100284 unsigned int discard_granularity;
285 unsigned int discard_alignment;
Martin K. Petersen025146e2009-05-22 17:17:51 -0400286
287 unsigned short logical_block_size;
Martin K. Petersen8a783622010-02-26 00:20:39 -0500288 unsigned short max_segments;
Martin K. Petersen13f05c82010-09-10 20:50:10 +0200289 unsigned short max_integrity_segments;
Martin K. Petersen025146e2009-05-22 17:17:51 -0400290
Martin K. Petersenc72758f2009-05-22 17:17:53 -0400291 unsigned char misaligned;
Martin K. Petersen86b37282009-11-10 11:50:21 +0100292 unsigned char discard_misaligned;
Martin K. Petersene692cb62010-12-01 19:41:49 +0100293 unsigned char cluster;
Martin K. Petersena934a002011-05-18 10:37:35 +0200294 unsigned char discard_zeroes_data;
Kent Overstreetc78afc62013-07-11 22:39:53 -0700295 unsigned char raid_partial_stripes_expensive;
Martin K. Petersen025146e2009-05-22 17:17:51 -0400296};
297
Richard Kennedyd7b76302011-07-13 21:17:23 +0200298struct request_queue {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700299 /*
300 * Together with queue_head for cacheline sharing
301 */
302 struct list_head queue_head;
303 struct request *last_merge;
Jens Axboeb374d182008-10-31 10:05:07 +0100304 struct elevator_queue *elevator;
Tejun Heo8a5ecdd2012-06-04 20:40:58 -0700305 int nr_rqs[2]; /* # allocated [a]sync rqs */
306 int nr_rqs_elvpriv; /* # allocated rqs w/ elvpriv */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700307
308 /*
Tejun Heoa0516612012-06-26 15:05:44 -0700309 * If blkcg is not used, @q->root_rl serves all requests. If blkcg
310 * is used, root blkg allocates from @q->root_rl and all other
311 * blkgs from their own blkg->rl. Which one to use should be
312 * determined using bio_request_list().
Linus Torvalds1da177e2005-04-16 15:20:36 -0700313 */
Tejun Heoa0516612012-06-26 15:05:44 -0700314 struct request_list root_rl;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700315
316 request_fn_proc *request_fn;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700317 make_request_fn *make_request_fn;
318 prep_rq_fn *prep_rq_fn;
James Bottomley28018c22010-07-01 19:49:17 +0900319 unprep_rq_fn *unprep_rq_fn;
Jens Axboeff856ba2006-01-09 16:02:34 +0100320 softirq_done_fn *softirq_done_fn;
Jens Axboe242f9dc2008-09-14 05:55:09 -0700321 rq_timed_out_fn *rq_timed_out_fn;
Tejun Heo2fb98e82008-02-19 11:36:53 +0100322 dma_drain_needed_fn *dma_drain_needed;
Kiyoshi Uedaef9e3fa2008-10-01 16:12:15 +0200323 lld_busy_fn *lld_busy_fn;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700324
Jens Axboe320ae512013-10-24 09:20:05 +0100325 struct blk_mq_ops *mq_ops;
326
327 unsigned int *mq_map;
328
329 /* sw queues */
Ming Leie6cdb092014-06-03 11:24:06 +0800330 struct blk_mq_ctx __percpu *queue_ctx;
Jens Axboe320ae512013-10-24 09:20:05 +0100331 unsigned int nr_queues;
332
333 /* hw dispatch queues */
334 struct blk_mq_hw_ctx **queue_hw_ctx;
335 unsigned int nr_hw_queues;
336
Linus Torvalds1da177e2005-04-16 15:20:36 -0700337 /*
Tejun Heo8922e162005-10-20 16:23:44 +0200338 * Dispatch queue sorting
339 */
Jens Axboe1b47f532005-10-20 16:37:00 +0200340 sector_t end_sector;
Tejun Heo8922e162005-10-20 16:23:44 +0200341 struct request *boundary_rq;
Tejun Heo8922e162005-10-20 16:23:44 +0200342
343 /*
Jens Axboe3cca6dc2011-03-02 11:08:00 -0500344 * Delayed queue handling
Linus Torvalds1da177e2005-04-16 15:20:36 -0700345 */
Jens Axboe3cca6dc2011-03-02 11:08:00 -0500346 struct delayed_work delay_work;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700347
348 struct backing_dev_info backing_dev_info;
349
350 /*
351 * The queue owner gets to use this for whatever they like.
352 * ll_rw_blk doesn't touch it.
353 */
354 void *queuedata;
355
Linus Torvalds1da177e2005-04-16 15:20:36 -0700356 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700357 * various queue flags, see QUEUE_* below
358 */
359 unsigned long queue_flags;
360
361 /*
Tejun Heoa73f7302011-12-14 00:33:37 +0100362 * ida allocated id for this queue. Used to index queues from
363 * ioctx.
364 */
365 int id;
366
367 /*
Richard Kennedyd7b76302011-07-13 21:17:23 +0200368 * queue needs bounce pages for pages above this limit
369 */
370 gfp_t bounce_gfp;
371
372 /*
152587d2005-04-12 16:22:06 -0500373 * protects queue structures from reentrancy. ->__queue_lock should
374 * _never_ be used directly, it is queue private. always use
375 * ->queue_lock.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700376 */
152587d2005-04-12 16:22:06 -0500377 spinlock_t __queue_lock;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700378 spinlock_t *queue_lock;
379
380 /*
381 * queue kobject
382 */
383 struct kobject kobj;
384
Jens Axboe320ae512013-10-24 09:20:05 +0100385 /*
386 * mq queue kobject
387 */
388 struct kobject mq_kobj;
389
Dan Williamsac6fc482015-10-21 13:20:18 -0400390#ifdef CONFIG_BLK_DEV_INTEGRITY
391 struct blk_integrity integrity;
392#endif /* CONFIG_BLK_DEV_INTEGRITY */
393
Rafael J. Wysocki47fafbc2014-12-04 01:00:23 +0100394#ifdef CONFIG_PM
Lin Ming6c954662013-03-23 11:42:26 +0800395 struct device *dev;
396 int rpm_status;
397 unsigned int nr_pending;
398#endif
399
Linus Torvalds1da177e2005-04-16 15:20:36 -0700400 /*
401 * queue settings
402 */
403 unsigned long nr_requests; /* Max # of requests */
404 unsigned int nr_congestion_on;
405 unsigned int nr_congestion_off;
406 unsigned int nr_batching;
407
James Bottomleyfa0ccd82008-01-10 11:30:36 -0600408 unsigned int dma_drain_size;
Richard Kennedyd7b76302011-07-13 21:17:23 +0200409 void *dma_drain_buffer;
Tejun Heoe3790c72008-03-04 11:18:17 +0100410 unsigned int dma_pad_mask;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700411 unsigned int dma_alignment;
412
413 struct blk_queue_tag *queue_tags;
Jens Axboe6eca9002007-10-25 10:14:47 +0200414 struct list_head tag_busy_list;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700415
Tejun Heo15853af2005-11-10 08:52:05 +0100416 unsigned int nr_sorted;
Jens Axboe0a7ae2f2009-05-20 08:54:31 +0200417 unsigned int in_flight[2];
Bart Van Assche24faf6f2012-11-28 13:46:45 +0100418 /*
419 * Number of active block driver functions for which blk_drain_queue()
420 * must wait. Must be incremented around functions that unlock the
421 * queue_lock internally, e.g. scsi_request_fn().
422 */
423 unsigned int request_fn_active;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700424
Jens Axboe242f9dc2008-09-14 05:55:09 -0700425 unsigned int rq_timeout;
426 struct timer_list timeout;
Christoph Hellwig287922e2015-10-30 20:57:30 +0800427 struct work_struct timeout_work;
Jens Axboe242f9dc2008-09-14 05:55:09 -0700428 struct list_head timeout_list;
429
Tejun Heoa612fdd2011-12-14 00:33:41 +0100430 struct list_head icq_list;
Tejun Heo4eef3042012-03-05 13:15:18 -0800431#ifdef CONFIG_BLK_CGROUP
Tejun Heoa2b16932012-04-13 13:11:33 -0700432 DECLARE_BITMAP (blkcg_pols, BLKCG_MAX_POLS);
Tejun Heo3c798392012-04-16 13:57:25 -0700433 struct blkcg_gq *root_blkg;
Tejun Heo03aa2642012-03-05 13:15:19 -0800434 struct list_head blkg_list;
Tejun Heo4eef3042012-03-05 13:15:18 -0800435#endif
Tejun Heoa612fdd2011-12-14 00:33:41 +0100436
Martin K. Petersen025146e2009-05-22 17:17:51 -0400437 struct queue_limits limits;
438
Linus Torvalds1da177e2005-04-16 15:20:36 -0700439 /*
440 * sg stuff
441 */
442 unsigned int sg_timeout;
443 unsigned int sg_reserved_size;
Christoph Lameter19460892005-06-23 00:08:19 -0700444 int node;
Alexey Dobriyan6c5c9342006-09-29 01:59:40 -0700445#ifdef CONFIG_BLK_DEV_IO_TRACE
Jens Axboe2056a782006-03-23 20:00:26 +0100446 struct blk_trace *blk_trace;
Alexey Dobriyan6c5c9342006-09-29 01:59:40 -0700447#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700448 /*
Tejun Heo4913efe2010-09-03 11:56:16 +0200449 * for flush operations
Linus Torvalds1da177e2005-04-16 15:20:36 -0700450 */
Ming Lei7c94e1c2014-09-25 23:23:43 +0800451 struct blk_flush_queue *fq;
Al Viro483f4af2006-03-18 18:34:37 -0500452
Christoph Hellwig6fca6a62014-05-28 08:08:02 -0600453 struct list_head requeue_list;
454 spinlock_t requeue_lock;
Mike Snitzer28494502016-09-14 13:28:30 -0400455 struct delayed_work requeue_work;
Christoph Hellwig6fca6a62014-05-28 08:08:02 -0600456
Al Viro483f4af2006-03-18 18:34:37 -0500457 struct mutex sysfs_lock;
FUJITA Tomonorid351af02007-07-09 12:40:35 +0200458
Tejun Heod7325802012-03-05 13:14:58 -0800459 int bypass_depth;
Christoph Hellwig4ecd4fe2015-05-07 09:38:13 +0200460 atomic_t mq_freeze_depth;
Tejun Heod7325802012-03-05 13:14:58 -0800461
FUJITA Tomonorid351af02007-07-09 12:40:35 +0200462#if defined(CONFIG_BLK_DEV_BSG)
Mike Christieaa387cc2011-07-31 22:05:09 +0200463 bsg_job_fn *bsg_job_fn;
464 int bsg_job_size;
FUJITA Tomonorid351af02007-07-09 12:40:35 +0200465 struct bsg_class_device bsg_dev;
466#endif
Vivek Goyale43473b2010-09-15 17:06:35 -0400467
468#ifdef CONFIG_BLK_DEV_THROTTLING
469 /* Throttle data */
470 struct throtl_data *td;
471#endif
Tejun Heo548bc8e2013-01-09 08:05:13 -0800472 struct rcu_head rcu_head;
Jens Axboe320ae512013-10-24 09:20:05 +0100473 wait_queue_head_t mq_freeze_wq;
Dan Williams3ef28e82015-10-21 13:20:12 -0400474 struct percpu_ref q_usage_counter;
Jens Axboe320ae512013-10-24 09:20:05 +0100475 struct list_head all_q_node;
Jens Axboe0d2602c2014-05-13 15:10:52 -0600476
477 struct blk_mq_tag_set *tag_set;
478 struct list_head tag_set_list;
Kent Overstreet54efd502015-04-23 22:37:18 -0700479 struct bio_set *bio_split;
Akinobu Mita4593fdb2015-09-27 02:09:20 +0900480
481 bool mq_sysfs_init_done;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700482};
483
Linus Torvalds1da177e2005-04-16 15:20:36 -0700484#define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */
485#define QUEUE_FLAG_STOPPED 2 /* queue is stopped */
Jens Axboe1faa16d2009-04-06 14:48:01 +0200486#define QUEUE_FLAG_SYNCFULL 3 /* read queue has been filled */
487#define QUEUE_FLAG_ASYNCFULL 4 /* write queue has been filled */
Bart Van Assche3f3299d2012-11-28 13:42:38 +0100488#define QUEUE_FLAG_DYING 5 /* queue being torn down */
Tejun Heod7325802012-03-05 13:14:58 -0800489#define QUEUE_FLAG_BYPASS 6 /* act as dumb FIFO queue */
Jens Axboec21e6be2011-04-19 13:32:46 +0200490#define QUEUE_FLAG_BIDI 7 /* queue supports bidi requests */
491#define QUEUE_FLAG_NOMERGES 8 /* disable merge attempts */
Dan Williams5757a6d2011-07-23 20:44:25 +0200492#define QUEUE_FLAG_SAME_COMP 9 /* complete on same CPU-group */
Jens Axboec21e6be2011-04-19 13:32:46 +0200493#define QUEUE_FLAG_FAIL_IO 10 /* fake timeout */
494#define QUEUE_FLAG_STACKABLE 11 /* supports request stacking */
495#define QUEUE_FLAG_NONROT 12 /* non-rotational device (SSD) */
Fernando Luis Vázquez Cao88e740f2008-10-27 18:44:46 +0900496#define QUEUE_FLAG_VIRT QUEUE_FLAG_NONROT /* paravirt device */
Jens Axboec21e6be2011-04-19 13:32:46 +0200497#define QUEUE_FLAG_IO_STAT 13 /* do IO stats */
498#define QUEUE_FLAG_DISCARD 14 /* supports DISCARD */
499#define QUEUE_FLAG_NOXMERGES 15 /* No extended merges */
500#define QUEUE_FLAG_ADD_RANDOM 16 /* Contributes to random pool */
Christoph Hellwig288dab82016-06-09 16:00:36 +0200501#define QUEUE_FLAG_SECERASE 17 /* supports secure erase */
Dan Williams5757a6d2011-07-23 20:44:25 +0200502#define QUEUE_FLAG_SAME_FORCE 18 /* force complete on same CPU */
Bart Van Asschec246e802012-12-06 14:32:01 +0100503#define QUEUE_FLAG_DEAD 19 /* queue tear-down finished */
Jens Axboe320ae512013-10-24 09:20:05 +0100504#define QUEUE_FLAG_INIT_DONE 20 /* queue is initialized */
Jens Axboe05f1dd52014-05-29 09:53:32 -0600505#define QUEUE_FLAG_NO_SG_MERGE 21 /* don't attempt to merge SG segments*/
Jens Axboe05229be2015-11-05 10:44:55 -0700506#define QUEUE_FLAG_POLL 22 /* IO polling enabled if set */
Jens Axboe93e9d8e2016-04-12 12:32:46 -0600507#define QUEUE_FLAG_WC 23 /* Write back caching */
508#define QUEUE_FLAG_FUA 24 /* device supports FUA writes */
Jens Axboec888a8f2016-04-13 13:33:19 -0600509#define QUEUE_FLAG_FLUSH_NQ 25 /* flush not queueuable */
Toshi Kani163d4ba2016-06-23 17:05:50 -0400510#define QUEUE_FLAG_DAX 26 /* device supports DAX */
Vinayak Menonbb730a12015-02-25 19:43:59 +0530511#define QUEUE_FLAG_FAST 27 /* fast block device (e.g. ram based) */
Jens Axboebc58ba92009-01-23 10:54:44 +0100512
513#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \
Jens Axboe01e97f62009-09-03 20:06:47 +0200514 (1 << QUEUE_FLAG_STACKABLE) | \
Jens Axboee2e1a142010-06-09 10:42:09 +0200515 (1 << QUEUE_FLAG_SAME_COMP) | \
516 (1 << QUEUE_FLAG_ADD_RANDOM))
Tejun Heo797e7db2006-01-06 09:51:03 +0100517
Jens Axboe94eddfb2013-11-19 09:25:07 -0700518#define QUEUE_FLAG_MQ_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \
Mike Snitzerad9cf3b2014-12-16 12:54:25 -0500519 (1 << QUEUE_FLAG_STACKABLE) | \
Christoph Hellwig8e0b60b2016-03-03 16:04:03 +0100520 (1 << QUEUE_FLAG_SAME_COMP) | \
521 (1 << QUEUE_FLAG_POLL))
Jens Axboe94eddfb2013-11-19 09:25:07 -0700522
Andi Kleen8bcb6c72012-03-30 12:33:28 +0200523static inline void queue_lockdep_assert_held(struct request_queue *q)
Linus Torvalds8f45c1a2008-04-29 10:16:38 -0700524{
Andi Kleen8bcb6c72012-03-30 12:33:28 +0200525 if (q->queue_lock)
526 lockdep_assert_held(q->queue_lock);
Linus Torvalds8f45c1a2008-04-29 10:16:38 -0700527}
528
Nick Piggin75ad23b2008-04-29 14:48:33 +0200529static inline void queue_flag_set_unlocked(unsigned int flag,
530 struct request_queue *q)
531{
532 __set_bit(flag, &q->queue_flags);
533}
534
Jens Axboee48ec692008-07-03 13:18:54 +0200535static inline int queue_flag_test_and_clear(unsigned int flag,
536 struct request_queue *q)
537{
Andi Kleen8bcb6c72012-03-30 12:33:28 +0200538 queue_lockdep_assert_held(q);
Jens Axboee48ec692008-07-03 13:18:54 +0200539
540 if (test_bit(flag, &q->queue_flags)) {
541 __clear_bit(flag, &q->queue_flags);
542 return 1;
543 }
544
545 return 0;
546}
547
548static inline int queue_flag_test_and_set(unsigned int flag,
549 struct request_queue *q)
550{
Andi Kleen8bcb6c72012-03-30 12:33:28 +0200551 queue_lockdep_assert_held(q);
Jens Axboee48ec692008-07-03 13:18:54 +0200552
553 if (!test_bit(flag, &q->queue_flags)) {
554 __set_bit(flag, &q->queue_flags);
555 return 0;
556 }
557
558 return 1;
559}
560
Nick Piggin75ad23b2008-04-29 14:48:33 +0200561static inline void queue_flag_set(unsigned int flag, struct request_queue *q)
562{
Andi Kleen8bcb6c72012-03-30 12:33:28 +0200563 queue_lockdep_assert_held(q);
Nick Piggin75ad23b2008-04-29 14:48:33 +0200564 __set_bit(flag, &q->queue_flags);
565}
566
567static inline void queue_flag_clear_unlocked(unsigned int flag,
568 struct request_queue *q)
569{
570 __clear_bit(flag, &q->queue_flags);
571}
572
Jens Axboe0a7ae2f2009-05-20 08:54:31 +0200573static inline int queue_in_flight(struct request_queue *q)
574{
575 return q->in_flight[0] + q->in_flight[1];
576}
577
Nick Piggin75ad23b2008-04-29 14:48:33 +0200578static inline void queue_flag_clear(unsigned int flag, struct request_queue *q)
579{
Andi Kleen8bcb6c72012-03-30 12:33:28 +0200580 queue_lockdep_assert_held(q);
Nick Piggin75ad23b2008-04-29 14:48:33 +0200581 __clear_bit(flag, &q->queue_flags);
582}
583
Linus Torvalds1da177e2005-04-16 15:20:36 -0700584#define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags)
585#define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags)
Bart Van Assche3f3299d2012-11-28 13:42:38 +0100586#define blk_queue_dying(q) test_bit(QUEUE_FLAG_DYING, &(q)->queue_flags)
Bart Van Asschec246e802012-12-06 14:32:01 +0100587#define blk_queue_dead(q) test_bit(QUEUE_FLAG_DEAD, &(q)->queue_flags)
Tejun Heod7325802012-03-05 13:14:58 -0800588#define blk_queue_bypass(q) test_bit(QUEUE_FLAG_BYPASS, &(q)->queue_flags)
Jens Axboe320ae512013-10-24 09:20:05 +0100589#define blk_queue_init_done(q) test_bit(QUEUE_FLAG_INIT_DONE, &(q)->queue_flags)
Alan D. Brunelleac9fafa2008-04-29 14:44:19 +0200590#define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags)
Alan D. Brunelle488991e2010-01-29 09:04:08 +0100591#define blk_queue_noxmerges(q) \
592 test_bit(QUEUE_FLAG_NOXMERGES, &(q)->queue_flags)
Jens Axboea68bbdd2008-09-24 13:03:33 +0200593#define blk_queue_nonrot(q) test_bit(QUEUE_FLAG_NONROT, &(q)->queue_flags)
Jens Axboebc58ba92009-01-23 10:54:44 +0100594#define blk_queue_io_stat(q) test_bit(QUEUE_FLAG_IO_STAT, &(q)->queue_flags)
Jens Axboee2e1a142010-06-09 10:42:09 +0200595#define blk_queue_add_random(q) test_bit(QUEUE_FLAG_ADD_RANDOM, &(q)->queue_flags)
Kiyoshi Ueda4ee5eaf2008-09-18 10:46:13 -0400596#define blk_queue_stackable(q) \
597 test_bit(QUEUE_FLAG_STACKABLE, &(q)->queue_flags)
Christoph Hellwigc15227d2009-09-30 13:52:12 +0200598#define blk_queue_discard(q) test_bit(QUEUE_FLAG_DISCARD, &(q)->queue_flags)
Christoph Hellwig288dab82016-06-09 16:00:36 +0200599#define blk_queue_secure_erase(q) \
600 (test_bit(QUEUE_FLAG_SECERASE, &(q)->queue_flags))
Toshi Kani163d4ba2016-06-23 17:05:50 -0400601#define blk_queue_dax(q) test_bit(QUEUE_FLAG_DAX, &(q)->queue_flags)
Vinayak Menonbb730a12015-02-25 19:43:59 +0530602#define blk_queue_fast(q) test_bit(QUEUE_FLAG_FAST, &(q)->queue_flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700603
Christoph Hellwig33659eb2010-08-07 18:17:56 +0200604#define blk_noretry_request(rq) \
605 ((rq)->cmd_flags & (REQ_FAILFAST_DEV|REQ_FAILFAST_TRANSPORT| \
606 REQ_FAILFAST_DRIVER))
Jens Axboe4aff5e22006-08-10 08:44:47 +0200607
Christoph Hellwig33659eb2010-08-07 18:17:56 +0200608#define blk_account_rq(rq) \
609 (((rq)->cmd_flags & REQ_STARTED) && \
Martin K. Petersene2a60da2012-09-18 12:19:25 -0400610 ((rq)->cmd_type == REQ_TYPE_FS))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700611
Jens Axboeab780f12008-08-26 10:25:02 +0200612#define blk_rq_cpu_valid(rq) ((rq)->cpu != -1)
FUJITA Tomonoriabae1fd2007-07-16 08:52:14 +0200613#define blk_bidi_rq(rq) ((rq)->next_rq != NULL)
Kiyoshi Ueda336cdb42007-12-11 17:40:30 -0500614/* rq->queuelist of dequeued request must be list_empty() */
615#define blk_queued_rq(rq) (!list_empty(&(rq)->queuelist))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700616
617#define list_entry_rq(ptr) list_entry((ptr), struct request, queuelist)
618
Mike Christie4e1b2d522016-06-05 14:32:22 -0500619#define rq_data_dir(rq) (op_is_write(req_op(rq)) ? WRITE : READ)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700620
Jens Axboe49fd5242014-04-16 10:57:18 -0600621/*
622 * Driver can handle struct request, if it either has an old style
623 * request_fn defined, or is blk-mq based.
624 */
625static inline bool queue_is_rq_based(struct request_queue *q)
626{
627 return q->request_fn || q->mq_ops;
628}
629
Martin K. Petersene692cb62010-12-01 19:41:49 +0100630static inline unsigned int blk_queue_cluster(struct request_queue *q)
631{
632 return q->limits.cluster;
633}
634
Jens Axboe9e2585a2006-07-28 09:26:13 +0200635/*
Jens Axboe1faa16d2009-04-06 14:48:01 +0200636 * We regard a request as sync, if either a read or a sync write
Jens Axboe9e2585a2006-07-28 09:26:13 +0200637 */
Mike Christied9d8c5c2016-06-05 14:32:16 -0500638static inline bool rw_is_sync(int op, unsigned int rw_flags)
Jens Axboe1faa16d2009-04-06 14:48:01 +0200639{
Mike Christied9d8c5c2016-06-05 14:32:16 -0500640 return op == REQ_OP_READ || (rw_flags & REQ_SYNC);
Jens Axboe1faa16d2009-04-06 14:48:01 +0200641}
642
643static inline bool rq_is_sync(struct request *rq)
644{
Mike Christied9d8c5c2016-06-05 14:32:16 -0500645 return rw_is_sync(req_op(rq), rq->cmd_flags);
Jens Axboe1faa16d2009-04-06 14:48:01 +0200646}
647
Tejun Heo5b788ce2012-06-04 20:40:59 -0700648static inline bool blk_rl_full(struct request_list *rl, bool sync)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700649{
Tejun Heo5b788ce2012-06-04 20:40:59 -0700650 unsigned int flag = sync ? BLK_RL_SYNCFULL : BLK_RL_ASYNCFULL;
651
652 return rl->flags & flag;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700653}
654
Tejun Heo5b788ce2012-06-04 20:40:59 -0700655static inline void blk_set_rl_full(struct request_list *rl, bool sync)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700656{
Tejun Heo5b788ce2012-06-04 20:40:59 -0700657 unsigned int flag = sync ? BLK_RL_SYNCFULL : BLK_RL_ASYNCFULL;
658
659 rl->flags |= flag;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700660}
661
Tejun Heo5b788ce2012-06-04 20:40:59 -0700662static inline void blk_clear_rl_full(struct request_list *rl, bool sync)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700663{
Tejun Heo5b788ce2012-06-04 20:40:59 -0700664 unsigned int flag = sync ? BLK_RL_SYNCFULL : BLK_RL_ASYNCFULL;
665
666 rl->flags &= ~flag;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700667}
668
Martin K. Petersene2a60da2012-09-18 12:19:25 -0400669static inline bool rq_mergeable(struct request *rq)
670{
671 if (rq->cmd_type != REQ_TYPE_FS)
672 return false;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700673
Mike Christie3a5e02c2016-06-05 14:32:23 -0500674 if (req_op(rq) == REQ_OP_FLUSH)
675 return false;
676
Martin K. Petersene2a60da2012-09-18 12:19:25 -0400677 if (rq->cmd_flags & REQ_NOMERGE_FLAGS)
678 return false;
679
680 return true;
681}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700682
Martin K. Petersen4363ac72012-09-18 12:19:27 -0400683static inline bool blk_write_same_mergeable(struct bio *a, struct bio *b)
684{
685 if (bio_data(a) == bio_data(b))
686 return true;
687
688 return false;
689}
690
Linus Torvalds1da177e2005-04-16 15:20:36 -0700691/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700692 * q->prep_rq_fn return values
693 */
Martin K. Petersen0fb5b1f2016-02-04 00:52:12 -0500694enum {
695 BLKPREP_OK, /* serve it */
696 BLKPREP_KILL, /* fatal error, kill, return -EIO */
697 BLKPREP_DEFER, /* leave on queue */
698 BLKPREP_INVALID, /* invalid command, kill, return -EREMOTEIO */
699};
Linus Torvalds1da177e2005-04-16 15:20:36 -0700700
701extern unsigned long blk_max_low_pfn, blk_max_pfn;
702
703/*
704 * standard bounce addresses:
705 *
706 * BLK_BOUNCE_HIGH : bounce all highmem pages
707 * BLK_BOUNCE_ANY : don't bounce anything
708 * BLK_BOUNCE_ISA : bounce pages above ISA DMA boundary
709 */
Andi Kleen24728922008-04-21 09:51:05 +0200710
711#if BITS_PER_LONG == 32
Linus Torvalds1da177e2005-04-16 15:20:36 -0700712#define BLK_BOUNCE_HIGH ((u64)blk_max_low_pfn << PAGE_SHIFT)
Andi Kleen24728922008-04-21 09:51:05 +0200713#else
714#define BLK_BOUNCE_HIGH -1ULL
715#endif
716#define BLK_BOUNCE_ANY (-1ULL)
FUJITA Tomonoribfe17232010-05-31 15:59:03 +0900717#define BLK_BOUNCE_ISA (DMA_BIT_MASK(24))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700718
Jens Axboe3d6392c2007-07-09 12:38:05 +0200719/*
720 * default timeout for SG_IO if none specified
721 */
722#define BLK_DEFAULT_SG_TIMEOUT (60 * HZ)
Linus Torvaldsf2f1fa72008-12-05 14:49:18 -0800723#define BLK_MIN_SG_TIMEOUT (7 * HZ)
Jens Axboe3d6392c2007-07-09 12:38:05 +0200724
Christoph Lameter2a7326b2007-07-17 04:03:37 -0700725#ifdef CONFIG_BOUNCE
Linus Torvalds1da177e2005-04-16 15:20:36 -0700726extern int init_emergency_isa_pool(void);
Jens Axboe165125e2007-07-24 09:28:11 +0200727extern void blk_queue_bounce(struct request_queue *q, struct bio **bio);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700728#else
729static inline int init_emergency_isa_pool(void)
730{
731 return 0;
732}
Jens Axboe165125e2007-07-24 09:28:11 +0200733static inline void blk_queue_bounce(struct request_queue *q, struct bio **bio)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700734{
735}
736#endif /* CONFIG_MMU */
737
FUJITA Tomonori152e2832008-08-28 16:17:06 +0900738struct rq_map_data {
739 struct page **pages;
740 int page_order;
741 int nr_entries;
FUJITA Tomonori56c451f2008-12-18 14:49:37 +0900742 unsigned long offset;
FUJITA Tomonori97ae77a2008-12-18 14:49:38 +0900743 int null_mapped;
FUJITA Tomonoriecb554a2009-07-09 14:46:53 +0200744 int from_user;
FUJITA Tomonori152e2832008-08-28 16:17:06 +0900745};
746
NeilBrown5705f702007-09-25 12:35:59 +0200747struct req_iterator {
Kent Overstreet79886132013-11-23 17:19:00 -0800748 struct bvec_iter iter;
NeilBrown5705f702007-09-25 12:35:59 +0200749 struct bio *bio;
750};
751
752/* This should not be used directly - use rq_for_each_segment */
Jens Axboe1e428072009-02-23 09:03:10 +0100753#define for_each_bio(_bio) \
754 for (; _bio; _bio = _bio->bi_next)
NeilBrown5705f702007-09-25 12:35:59 +0200755#define __rq_for_each_bio(_bio, rq) \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700756 if ((rq->bio)) \
757 for (_bio = (rq)->bio; _bio; _bio = _bio->bi_next)
758
NeilBrown5705f702007-09-25 12:35:59 +0200759#define rq_for_each_segment(bvl, _rq, _iter) \
760 __rq_for_each_bio(_iter.bio, _rq) \
Kent Overstreet79886132013-11-23 17:19:00 -0800761 bio_for_each_segment(bvl, _iter.bio, _iter.iter)
NeilBrown5705f702007-09-25 12:35:59 +0200762
Kent Overstreet4550dd62013-08-07 14:26:21 -0700763#define rq_iter_last(bvec, _iter) \
Kent Overstreet79886132013-11-23 17:19:00 -0800764 (_iter.bio->bi_next == NULL && \
Kent Overstreet4550dd62013-08-07 14:26:21 -0700765 bio_iter_last(bvec, _iter.iter))
NeilBrown5705f702007-09-25 12:35:59 +0200766
Ilya Loginov2d4dc892009-11-26 09:16:19 +0100767#ifndef ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
768# error "You should define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE for your platform"
769#endif
770#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
771extern void rq_flush_dcache_pages(struct request *rq);
772#else
773static inline void rq_flush_dcache_pages(struct request *rq)
774{
775}
776#endif
777
Toshi Kani2af3a812016-05-10 10:23:52 -0600778#ifdef CONFIG_PRINTK
779#define vfs_msg(sb, level, fmt, ...) \
780 __vfs_msg(sb, level, fmt, ##__VA_ARGS__)
781#else
782#define vfs_msg(sb, level, fmt, ...) \
783do { \
784 no_printk(fmt, ##__VA_ARGS__); \
785 __vfs_msg(sb, "", " "); \
786} while (0)
787#endif
788
Linus Torvalds1da177e2005-04-16 15:20:36 -0700789extern int blk_register_queue(struct gendisk *disk);
790extern void blk_unregister_queue(struct gendisk *disk);
Jens Axboedece1632015-11-05 10:41:16 -0700791extern blk_qc_t generic_make_request(struct bio *bio);
FUJITA Tomonori2a4aa302008-04-29 09:54:36 +0200792extern void blk_rq_init(struct request_queue *q, struct request *rq);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700793extern void blk_put_request(struct request *);
Jens Axboe165125e2007-07-24 09:28:11 +0200794extern void __blk_put_request(struct request_queue *, struct request *);
Jens Axboe165125e2007-07-24 09:28:11 +0200795extern struct request *blk_get_request(struct request_queue *, int, gfp_t);
Jens Axboef27b0872014-06-06 07:57:37 -0600796extern void blk_rq_set_block_pc(struct request *);
Jens Axboe165125e2007-07-24 09:28:11 +0200797extern void blk_requeue_request(struct request_queue *, struct request *);
Christoph Hellwig66ac0282010-06-18 16:59:42 +0200798extern void blk_add_request_payload(struct request *rq, struct page *page,
Ming Lin37e58232016-03-22 00:24:44 -0700799 int offset, unsigned int len);
Kiyoshi Uedaef9e3fa2008-10-01 16:12:15 +0200800extern int blk_lld_busy(struct request_queue *q);
Mike Snitzer78d8e582015-06-26 10:01:13 -0400801extern int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
802 struct bio_set *bs, gfp_t gfp_mask,
803 int (*bio_ctr)(struct bio *, struct bio *, void *),
804 void *data);
805extern void blk_rq_unprep_clone(struct request *rq);
Kiyoshi Ueda82124d62008-09-18 10:45:38 -0400806extern int blk_insert_cloned_request(struct request_queue *q,
807 struct request *rq);
Christoph Hellwig98d61d52016-07-19 11:31:51 +0200808extern int blk_rq_append_bio(struct request *rq, struct bio *bio);
Jens Axboe3cca6dc2011-03-02 11:08:00 -0500809extern void blk_delay_queue(struct request_queue *, unsigned long);
Kent Overstreet54efd502015-04-23 22:37:18 -0700810extern void blk_queue_split(struct request_queue *, struct bio **,
811 struct bio_set *);
Jens Axboe165125e2007-07-24 09:28:11 +0200812extern void blk_recount_segments(struct request_queue *, struct bio *);
Paolo Bonzini0bfc96c2012-01-12 16:01:28 +0100813extern int scsi_verify_blk_ioctl(struct block_device *, unsigned int);
Paolo Bonzini577ebb32012-01-12 16:01:27 +0100814extern int scsi_cmd_blk_ioctl(struct block_device *, fmode_t,
815 unsigned int, void __user *);
Al Viro74f3c8a2007-08-27 15:38:10 -0400816extern int scsi_cmd_ioctl(struct request_queue *, struct gendisk *, fmode_t,
817 unsigned int, void __user *);
Al Viroe915e872008-09-02 17:16:41 -0400818extern int sg_scsi_ioctl(struct request_queue *, struct gendisk *, fmode_t,
819 struct scsi_ioctl_command __user *);
Andrew Morton3fcfab12006-10-19 23:28:16 -0700820
AnilKumar Chimata7214d7e2017-06-23 03:09:59 -0700821extern void blk_recalc_rq_segments(struct request *rq);
Christoph Hellwig6f3b0e82015-11-26 09:13:05 +0100822extern int blk_queue_enter(struct request_queue *q, bool nowait);
Dan Williams2e6edc92015-11-19 13:29:28 -0800823extern void blk_queue_exit(struct request_queue *q);
Jens Axboe165125e2007-07-24 09:28:11 +0200824extern void blk_start_queue(struct request_queue *q);
Jens Axboe21491412015-12-28 13:01:22 -0700825extern void blk_start_queue_async(struct request_queue *q);
Jens Axboe165125e2007-07-24 09:28:11 +0200826extern void blk_stop_queue(struct request_queue *q);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700827extern void blk_sync_queue(struct request_queue *q);
Jens Axboe165125e2007-07-24 09:28:11 +0200828extern void __blk_stop_queue(struct request_queue *q);
Christoph Hellwig24ecfbe2011-04-18 11:41:33 +0200829extern void __blk_run_queue(struct request_queue *q);
Christoph Hellwiga7928c12015-04-17 22:37:20 +0200830extern void __blk_run_queue_uncond(struct request_queue *q);
Jens Axboe165125e2007-07-24 09:28:11 +0200831extern void blk_run_queue(struct request_queue *);
Jens Axboec21e6be2011-04-19 13:32:46 +0200832extern void blk_run_queue_async(struct request_queue *q);
FUJITA Tomonoria3bce902008-08-28 16:17:05 +0900833extern int blk_rq_map_user(struct request_queue *, struct request *,
FUJITA Tomonori152e2832008-08-28 16:17:06 +0900834 struct rq_map_data *, void __user *, unsigned long,
835 gfp_t);
Jens Axboe8e5cfc42006-12-19 11:12:46 +0100836extern int blk_rq_unmap_user(struct bio *);
Jens Axboe165125e2007-07-24 09:28:11 +0200837extern int blk_rq_map_kern(struct request_queue *, struct request *, void *, unsigned int, gfp_t);
838extern int blk_rq_map_user_iov(struct request_queue *, struct request *,
Kent Overstreet26e49cf2015-01-18 16:16:31 +0100839 struct rq_map_data *, const struct iov_iter *,
840 gfp_t);
Jens Axboe165125e2007-07-24 09:28:11 +0200841extern int blk_execute_rq(struct request_queue *, struct gendisk *,
James Bottomley 994ca9a2005-06-20 14:11:09 +0200842 struct request *, int);
Jens Axboe165125e2007-07-24 09:28:11 +0200843extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *,
Jens Axboe15fc8582006-01-06 10:00:50 +0100844 struct request *, int, rq_end_io_fn *);
Mike Christie6e39b69e2005-11-11 05:30:24 -0600845
Jens Axboe05229be2015-11-05 10:44:55 -0700846bool blk_poll(struct request_queue *q, blk_qc_t cookie);
847
Jens Axboe165125e2007-07-24 09:28:11 +0200848static inline struct request_queue *bdev_get_queue(struct block_device *bdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700849{
Tejun Heoff9ea322014-09-08 08:03:56 +0900850 return bdev->bd_disk->queue; /* this is never NULL */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700851}
852
Linus Torvalds1da177e2005-04-16 15:20:36 -0700853/*
Tejun Heo80a761f2009-07-03 17:48:17 +0900854 * blk_rq_pos() : the current sector
855 * blk_rq_bytes() : bytes left in the entire request
856 * blk_rq_cur_bytes() : bytes left in the current segment
857 * blk_rq_err_bytes() : bytes left till the next error boundary
858 * blk_rq_sectors() : sectors left in the entire request
859 * blk_rq_cur_sectors() : sectors left in the current segment
Tejun Heo5efccd12009-04-23 11:05:18 +0900860 */
Tejun Heo5b936292009-05-07 22:24:38 +0900861static inline sector_t blk_rq_pos(const struct request *rq)
862{
Tejun Heoa2dec7b2009-05-07 22:24:44 +0900863 return rq->__sector;
Tejun Heo5b936292009-05-07 22:24:38 +0900864}
865
Tejun Heo2e46e8b2009-05-07 22:24:41 +0900866static inline unsigned int blk_rq_bytes(const struct request *rq)
867{
Tejun Heoa2dec7b2009-05-07 22:24:44 +0900868 return rq->__data_len;
Tejun Heo2e46e8b2009-05-07 22:24:41 +0900869}
870
871static inline int blk_rq_cur_bytes(const struct request *rq)
872{
873 return rq->bio ? bio_cur_bytes(rq->bio) : 0;
874}
Tejun Heo5efccd12009-04-23 11:05:18 +0900875
Tejun Heo80a761f2009-07-03 17:48:17 +0900876extern unsigned int blk_rq_err_bytes(const struct request *rq);
877
Tejun Heo5b936292009-05-07 22:24:38 +0900878static inline unsigned int blk_rq_sectors(const struct request *rq)
879{
Tejun Heo2e46e8b2009-05-07 22:24:41 +0900880 return blk_rq_bytes(rq) >> 9;
Tejun Heo5b936292009-05-07 22:24:38 +0900881}
882
883static inline unsigned int blk_rq_cur_sectors(const struct request *rq)
884{
Tejun Heo2e46e8b2009-05-07 22:24:41 +0900885 return blk_rq_cur_bytes(rq) >> 9;
Tejun Heo5b936292009-05-07 22:24:38 +0900886}
887
Martin K. Petersenf31dc1c2012-09-18 12:19:26 -0400888static inline unsigned int blk_queue_get_max_sectors(struct request_queue *q,
Mike Christie8fe0d472016-06-05 14:32:15 -0500889 int op)
Martin K. Petersenf31dc1c2012-09-18 12:19:26 -0400890{
Adrian Hunter7afafc82016-08-16 10:59:35 +0300891 if (unlikely(op == REQ_OP_DISCARD || op == REQ_OP_SECURE_ERASE))
James Bottomley871dd922013-04-24 08:52:50 -0600892 return min(q->limits.max_discard_sectors, UINT_MAX >> 9);
Martin K. Petersenf31dc1c2012-09-18 12:19:26 -0400893
Mike Christie8fe0d472016-06-05 14:32:15 -0500894 if (unlikely(op == REQ_OP_WRITE_SAME))
Martin K. Petersen4363ac72012-09-18 12:19:27 -0400895 return q->limits.max_write_same_sectors;
896
Martin K. Petersenf31dc1c2012-09-18 12:19:26 -0400897 return q->limits.max_sectors;
898}
899
Jens Axboe762380a2014-06-05 13:38:39 -0600900/*
901 * Return maximum size of a request at given offset. Only valid for
902 * file system requests.
903 */
904static inline unsigned int blk_max_size_offset(struct request_queue *q,
905 sector_t offset)
906{
907 if (!q->limits.chunk_sectors)
Jens Axboe736ed4d2014-06-17 22:09:29 -0700908 return q->limits.max_sectors;
Jens Axboe762380a2014-06-05 13:38:39 -0600909
910 return q->limits.chunk_sectors -
911 (offset & (q->limits.chunk_sectors - 1));
912}
913
Damien Le Moal17007f32016-07-20 21:40:47 -0600914static inline unsigned int blk_rq_get_max_sectors(struct request *rq,
915 sector_t offset)
Martin K. Petersenf31dc1c2012-09-18 12:19:26 -0400916{
917 struct request_queue *q = rq->q;
918
Christoph Hellwigf21018422016-03-03 14:43:45 -0700919 if (unlikely(rq->cmd_type != REQ_TYPE_FS))
Martin K. Petersenf31dc1c2012-09-18 12:19:26 -0400920 return q->limits.max_hw_sectors;
921
Adrian Hunter7afafc82016-08-16 10:59:35 +0300922 if (!q->limits.chunk_sectors ||
923 req_op(rq) == REQ_OP_DISCARD ||
924 req_op(rq) == REQ_OP_SECURE_ERASE)
Mike Christie8fe0d472016-06-05 14:32:15 -0500925 return blk_queue_get_max_sectors(q, req_op(rq));
Jens Axboe762380a2014-06-05 13:38:39 -0600926
Damien Le Moal17007f32016-07-20 21:40:47 -0600927 return min(blk_max_size_offset(q, offset),
Mike Christie8fe0d472016-06-05 14:32:15 -0500928 blk_queue_get_max_sectors(q, req_op(rq)));
Martin K. Petersenf31dc1c2012-09-18 12:19:26 -0400929}
930
Jun'ichi Nomura75afb352013-09-21 13:57:47 -0600931static inline unsigned int blk_rq_count_bios(struct request *rq)
932{
933 unsigned int nr_bios = 0;
934 struct bio *bio;
935
936 __rq_for_each_bio(bio, rq)
937 nr_bios++;
938
939 return nr_bios;
940}
941
Tejun Heo5efccd12009-04-23 11:05:18 +0900942/*
Tejun Heo9934c8c2009-05-08 11:54:16 +0900943 * Request issue related functions.
944 */
945extern struct request *blk_peek_request(struct request_queue *q);
946extern void blk_start_request(struct request *rq);
947extern struct request *blk_fetch_request(struct request_queue *q);
948
949/*
Tejun Heo2e60e022009-04-23 11:05:18 +0900950 * Request completion related functions.
951 *
952 * blk_update_request() completes given number of bytes and updates
953 * the request without completing it.
954 *
Tejun Heof06d9a22009-04-23 11:05:19 +0900955 * blk_end_request() and friends. __blk_end_request() must be called
956 * with the request queue spinlock acquired.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700957 *
958 * Several drivers define their own end_request and call
Kiyoshi Ueda3bcddea2007-12-11 17:52:28 -0500959 * blk_end_request() for parts of the original function.
960 * This prevents code duplication in drivers.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700961 */
Tejun Heo2e60e022009-04-23 11:05:18 +0900962extern bool blk_update_request(struct request *rq, int error,
963 unsigned int nr_bytes);
Christoph Hellwig12120072014-04-16 09:44:59 +0200964extern void blk_finish_request(struct request *rq, int error);
FUJITA Tomonorib1f74492009-05-11 17:56:09 +0900965extern bool blk_end_request(struct request *rq, int error,
966 unsigned int nr_bytes);
967extern void blk_end_request_all(struct request *rq, int error);
968extern bool blk_end_request_cur(struct request *rq, int error);
Tejun Heo80a761f2009-07-03 17:48:17 +0900969extern bool blk_end_request_err(struct request *rq, int error);
FUJITA Tomonorib1f74492009-05-11 17:56:09 +0900970extern bool __blk_end_request(struct request *rq, int error,
971 unsigned int nr_bytes);
972extern void __blk_end_request_all(struct request *rq, int error);
973extern bool __blk_end_request_cur(struct request *rq, int error);
Tejun Heo80a761f2009-07-03 17:48:17 +0900974extern bool __blk_end_request_err(struct request *rq, int error);
Tejun Heo2e60e022009-04-23 11:05:18 +0900975
Jens Axboeff856ba2006-01-09 16:02:34 +0100976extern void blk_complete_request(struct request *);
Jens Axboe242f9dc2008-09-14 05:55:09 -0700977extern void __blk_complete_request(struct request *);
978extern void blk_abort_request(struct request *);
James Bottomley28018c22010-07-01 19:49:17 +0900979extern void blk_unprep_request(struct request *);
Jens Axboeff856ba2006-01-09 16:02:34 +0100980
Linus Torvalds1da177e2005-04-16 15:20:36 -0700981/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700982 * Access functions for manipulating queue properties
983 */
Jens Axboe165125e2007-07-24 09:28:11 +0200984extern struct request_queue *blk_init_queue_node(request_fn_proc *rfn,
Christoph Lameter19460892005-06-23 00:08:19 -0700985 spinlock_t *lock, int node_id);
Jens Axboe165125e2007-07-24 09:28:11 +0200986extern struct request_queue *blk_init_queue(request_fn_proc *, spinlock_t *);
Mike Snitzer01effb02010-05-11 08:57:42 +0200987extern struct request_queue *blk_init_allocated_queue(struct request_queue *,
988 request_fn_proc *, spinlock_t *);
Jens Axboe165125e2007-07-24 09:28:11 +0200989extern void blk_cleanup_queue(struct request_queue *);
990extern void blk_queue_make_request(struct request_queue *, make_request_fn *);
991extern void blk_queue_bounce_limit(struct request_queue *, u64);
Martin K. Petersen086fa5f2010-02-26 00:20:38 -0500992extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int);
Jens Axboe762380a2014-06-05 13:38:39 -0600993extern void blk_queue_chunk_sectors(struct request_queue *, unsigned int);
Martin K. Petersen8a783622010-02-26 00:20:39 -0500994extern void blk_queue_max_segments(struct request_queue *, unsigned short);
Jens Axboe165125e2007-07-24 09:28:11 +0200995extern void blk_queue_max_segment_size(struct request_queue *, unsigned int);
Christoph Hellwig67efc922009-09-30 13:54:20 +0200996extern void blk_queue_max_discard_sectors(struct request_queue *q,
997 unsigned int max_discard_sectors);
Martin K. Petersen4363ac72012-09-18 12:19:27 -0400998extern void blk_queue_max_write_same_sectors(struct request_queue *q,
999 unsigned int max_write_same_sectors);
Martin K. Petersene1defc42009-05-22 17:17:49 -04001000extern void blk_queue_logical_block_size(struct request_queue *, unsigned short);
Martin K. Petersen892b6f92010-10-13 21:18:03 +02001001extern void blk_queue_physical_block_size(struct request_queue *, unsigned int);
Martin K. Petersenc72758f2009-05-22 17:17:53 -04001002extern void blk_queue_alignment_offset(struct request_queue *q,
1003 unsigned int alignment);
Martin K. Petersen7c958e32009-07-31 11:49:11 -04001004extern void blk_limits_io_min(struct queue_limits *limits, unsigned int min);
Martin K. Petersenc72758f2009-05-22 17:17:53 -04001005extern void blk_queue_io_min(struct request_queue *q, unsigned int min);
Martin K. Petersen3c5820c2009-09-11 21:54:52 +02001006extern void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt);
Martin K. Petersenc72758f2009-05-22 17:17:53 -04001007extern void blk_queue_io_opt(struct request_queue *q, unsigned int opt);
Martin K. Petersene475bba2009-06-16 08:23:52 +02001008extern void blk_set_default_limits(struct queue_limits *lim);
Martin K. Petersenb1bd0552012-01-11 16:27:11 +01001009extern void blk_set_stacking_limits(struct queue_limits *lim);
Martin K. Petersenc72758f2009-05-22 17:17:53 -04001010extern int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
1011 sector_t offset);
Martin K. Petersen17be8c22010-01-11 03:21:49 -05001012extern int bdev_stack_limits(struct queue_limits *t, struct block_device *bdev,
1013 sector_t offset);
Martin K. Petersenc72758f2009-05-22 17:17:53 -04001014extern void disk_stack_limits(struct gendisk *disk, struct block_device *bdev,
1015 sector_t offset);
Jens Axboe165125e2007-07-24 09:28:11 +02001016extern void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b);
Tejun Heoe3790c72008-03-04 11:18:17 +01001017extern void blk_queue_dma_pad(struct request_queue *, unsigned int);
FUJITA Tomonori27f82212008-07-04 09:30:03 +02001018extern void blk_queue_update_dma_pad(struct request_queue *, unsigned int);
Tejun Heo2fb98e82008-02-19 11:36:53 +01001019extern int blk_queue_dma_drain(struct request_queue *q,
1020 dma_drain_needed_fn *dma_drain_needed,
1021 void *buf, unsigned int size);
Kiyoshi Uedaef9e3fa2008-10-01 16:12:15 +02001022extern void blk_queue_lld_busy(struct request_queue *q, lld_busy_fn *fn);
Jens Axboe165125e2007-07-24 09:28:11 +02001023extern void blk_queue_segment_boundary(struct request_queue *, unsigned long);
Keith Busch03100aa2015-08-19 14:24:05 -07001024extern void blk_queue_virt_boundary(struct request_queue *, unsigned long);
Jens Axboe165125e2007-07-24 09:28:11 +02001025extern void blk_queue_prep_rq(struct request_queue *, prep_rq_fn *pfn);
James Bottomley28018c22010-07-01 19:49:17 +09001026extern void blk_queue_unprep_rq(struct request_queue *, unprep_rq_fn *ufn);
Jens Axboe165125e2007-07-24 09:28:11 +02001027extern void blk_queue_dma_alignment(struct request_queue *, int);
James Bottomley11c3e682007-12-31 16:37:00 -06001028extern void blk_queue_update_dma_alignment(struct request_queue *, int);
Jens Axboe165125e2007-07-24 09:28:11 +02001029extern void blk_queue_softirq_done(struct request_queue *, softirq_done_fn *);
Jens Axboe242f9dc2008-09-14 05:55:09 -07001030extern void blk_queue_rq_timed_out(struct request_queue *, rq_timed_out_fn *);
1031extern void blk_queue_rq_timeout(struct request_queue *, unsigned int);
shaohua.li@intel.comf3876932011-05-06 11:34:32 -06001032extern void blk_queue_flush_queueable(struct request_queue *q, bool queueable);
Jens Axboe93e9d8e2016-04-12 12:32:46 -06001033extern void blk_queue_write_cache(struct request_queue *q, bool enabled, bool fua);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001034extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001035
Jens Axboe165125e2007-07-24 09:28:11 +02001036extern int blk_rq_map_sg(struct request_queue *, struct request *, struct scatterlist *);
AnilKumar Chimata7214d7e2017-06-23 03:09:59 -07001037extern int blk_rq_map_sg_no_cluster(struct request_queue *q, struct request *rq,
1038 struct scatterlist *sglist);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001039extern void blk_dump_rq_flags(struct request *, char *);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001040extern long nr_blockdev_pages(void);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001041
Tejun Heo09ac46c2011-12-14 00:33:38 +01001042bool __must_check blk_get_queue(struct request_queue *);
Jens Axboe165125e2007-07-24 09:28:11 +02001043struct request_queue *blk_alloc_queue(gfp_t);
1044struct request_queue *blk_alloc_queue_node(gfp_t, int);
1045extern void blk_put_queue(struct request_queue *);
Jens Axboe3f21c262015-06-05 10:57:37 -06001046extern void blk_set_queue_dying(struct request_queue *);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001047
Shaohua Li316cc672011-07-08 08:19:21 +02001048/*
Lin Ming6c954662013-03-23 11:42:26 +08001049 * block layer runtime pm functions
1050 */
Rafael J. Wysocki47fafbc2014-12-04 01:00:23 +01001051#ifdef CONFIG_PM
Lin Ming6c954662013-03-23 11:42:26 +08001052extern void blk_pm_runtime_init(struct request_queue *q, struct device *dev);
1053extern int blk_pre_runtime_suspend(struct request_queue *q);
1054extern void blk_post_runtime_suspend(struct request_queue *q, int err);
1055extern void blk_pre_runtime_resume(struct request_queue *q);
1056extern void blk_post_runtime_resume(struct request_queue *q, int err);
Mika Westerbergd07ab6d2016-02-18 10:54:11 +02001057extern void blk_set_runtime_active(struct request_queue *q);
Lin Ming6c954662013-03-23 11:42:26 +08001058#else
1059static inline void blk_pm_runtime_init(struct request_queue *q,
1060 struct device *dev) {}
1061static inline int blk_pre_runtime_suspend(struct request_queue *q)
1062{
1063 return -ENOSYS;
1064}
1065static inline void blk_post_runtime_suspend(struct request_queue *q, int err) {}
1066static inline void blk_pre_runtime_resume(struct request_queue *q) {}
1067static inline void blk_post_runtime_resume(struct request_queue *q, int err) {}
Tobias Klauserf99694c2016-11-18 15:16:06 +01001068static inline void blk_set_runtime_active(struct request_queue *q) {}
Lin Ming6c954662013-03-23 11:42:26 +08001069#endif
1070
1071/*
Suresh Jayaraman75df7132011-09-21 10:00:16 +02001072 * blk_plug permits building a queue of related requests by holding the I/O
1073 * fragments for a short period. This allows merging of sequential requests
1074 * into single larger request. As the requests are moved from a per-task list to
1075 * the device's request_queue in a batch, this results in improved scalability
1076 * as the lock contention for request_queue lock is reduced.
1077 *
1078 * It is ok not to disable preemption when adding the request to the plug list
1079 * or when attempting a merge, because blk_schedule_flush_list() will only flush
1080 * the plug list when the task sleeps by itself. For details, please see
1081 * schedule() where blk_schedule_flush_plug() is called.
Shaohua Li316cc672011-07-08 08:19:21 +02001082 */
Jens Axboe73c10102011-03-08 13:19:51 +01001083struct blk_plug {
Suresh Jayaraman75df7132011-09-21 10:00:16 +02001084 struct list_head list; /* requests */
Jens Axboe320ae512013-10-24 09:20:05 +01001085 struct list_head mq_list; /* blk-mq requests */
Suresh Jayaraman75df7132011-09-21 10:00:16 +02001086 struct list_head cb_list; /* md requires an unplug callback */
Jens Axboe73c10102011-03-08 13:19:51 +01001087};
Shaohua Li55c022b2011-07-08 08:19:20 +02001088#define BLK_MAX_REQUEST_COUNT 16
1089
NeilBrown9cbb1752012-07-31 09:08:14 +02001090struct blk_plug_cb;
NeilBrown74018dc2012-07-31 09:08:15 +02001091typedef void (*blk_plug_cb_fn)(struct blk_plug_cb *, bool);
NeilBrown048c9372011-04-18 09:52:22 +02001092struct blk_plug_cb {
1093 struct list_head list;
NeilBrown9cbb1752012-07-31 09:08:14 +02001094 blk_plug_cb_fn callback;
1095 void *data;
NeilBrown048c9372011-04-18 09:52:22 +02001096};
NeilBrown9cbb1752012-07-31 09:08:14 +02001097extern struct blk_plug_cb *blk_check_plugged(blk_plug_cb_fn unplug,
1098 void *data, int size);
Jens Axboe73c10102011-03-08 13:19:51 +01001099extern void blk_start_plug(struct blk_plug *);
1100extern void blk_finish_plug(struct blk_plug *);
Jens Axboef6603782011-04-15 15:49:07 +02001101extern void blk_flush_plug_list(struct blk_plug *, bool);
Jens Axboe73c10102011-03-08 13:19:51 +01001102
1103static inline void blk_flush_plug(struct task_struct *tsk)
1104{
1105 struct blk_plug *plug = tsk->plug;
1106
Christoph Hellwig88b996c2011-04-15 15:20:10 +02001107 if (plug)
Jens Axboea237c1c2011-04-16 13:27:55 +02001108 blk_flush_plug_list(plug, false);
1109}
1110
1111static inline void blk_schedule_flush_plug(struct task_struct *tsk)
1112{
1113 struct blk_plug *plug = tsk->plug;
1114
1115 if (plug)
Jens Axboef6603782011-04-15 15:49:07 +02001116 blk_flush_plug_list(plug, true);
Jens Axboe73c10102011-03-08 13:19:51 +01001117}
1118
1119static inline bool blk_needs_flush_plug(struct task_struct *tsk)
1120{
1121 struct blk_plug *plug = tsk->plug;
1122
Jens Axboe320ae512013-10-24 09:20:05 +01001123 return plug &&
1124 (!list_empty(&plug->list) ||
1125 !list_empty(&plug->mq_list) ||
1126 !list_empty(&plug->cb_list));
Jens Axboe73c10102011-03-08 13:19:51 +01001127}
1128
Linus Torvalds1da177e2005-04-16 15:20:36 -07001129/*
1130 * tag stuff
1131 */
Jens Axboe165125e2007-07-24 09:28:11 +02001132extern int blk_queue_start_tag(struct request_queue *, struct request *);
1133extern struct request *blk_queue_find_tag(struct request_queue *, int);
1134extern void blk_queue_end_tag(struct request_queue *, struct request *);
Shaohua Liee1b6f72015-01-15 17:32:25 -08001135extern int blk_queue_init_tags(struct request_queue *, int, struct blk_queue_tag *, int);
Jens Axboe165125e2007-07-24 09:28:11 +02001136extern void blk_queue_free_tags(struct request_queue *);
1137extern int blk_queue_resize_tags(struct request_queue *, int);
1138extern void blk_queue_invalidate_tags(struct request_queue *);
Shaohua Liee1b6f72015-01-15 17:32:25 -08001139extern struct blk_queue_tag *blk_init_tags(int, int);
James Bottomley492dfb42006-08-30 15:48:45 -04001140extern void blk_free_tags(struct blk_queue_tag *);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001141
David C Somayajuluf583f492006-10-04 08:27:25 +02001142static inline struct request *blk_map_queue_find_tag(struct blk_queue_tag *bqt,
1143 int tag)
1144{
1145 if (unlikely(bqt == NULL || tag >= bqt->real_max_depth))
1146 return NULL;
1147 return bqt->tag_index[tag];
1148}
Christoph Hellwigdd3932e2010-09-16 20:51:46 +02001149
Christoph Hellwige950fdf2016-07-19 11:23:33 +02001150
1151#define BLKDEV_DISCARD_SECURE (1 << 0) /* issue a secure erase */
1152#define BLKDEV_DISCARD_ZERO (1 << 1) /* must reliably zero data */
Christoph Hellwigdd3932e2010-09-16 20:51:46 +02001153
1154extern int blkdev_issue_flush(struct block_device *, gfp_t, sector_t *);
Dmitry Monakhovfbd9b092010-04-28 17:55:06 +04001155extern int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
1156 sector_t nr_sects, gfp_t gfp_mask, unsigned long flags);
Christoph Hellwig38f25252016-04-16 14:55:28 -04001157extern int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
Christoph Hellwig288dab82016-06-09 16:00:36 +02001158 sector_t nr_sects, gfp_t gfp_mask, int flags,
Mike Christie469e3212016-06-05 14:31:49 -05001159 struct bio **biop);
Martin K. Petersen4363ac72012-09-18 12:19:27 -04001160extern int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
1161 sector_t nr_sects, gfp_t gfp_mask, struct page *page);
Dmitry Monakhov3f14d792010-04-28 17:55:09 +04001162extern int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
Martin K. Petersend93ba7a2015-01-20 20:06:30 -05001163 sector_t nr_sects, gfp_t gfp_mask, bool discard);
Christoph Hellwig2cf6d262010-08-18 05:29:10 -04001164static inline int sb_issue_discard(struct super_block *sb, sector_t block,
1165 sector_t nr_blocks, gfp_t gfp_mask, unsigned long flags)
David Woodhousefb2dce82008-08-05 18:01:53 +01001166{
Christoph Hellwig2cf6d262010-08-18 05:29:10 -04001167 return blkdev_issue_discard(sb->s_bdev, block << (sb->s_blocksize_bits - 9),
1168 nr_blocks << (sb->s_blocksize_bits - 9),
1169 gfp_mask, flags);
David Woodhousefb2dce82008-08-05 18:01:53 +01001170}
Lukas Czernere6fa0be2010-10-27 21:30:04 -04001171static inline int sb_issue_zeroout(struct super_block *sb, sector_t block,
Theodore Ts'oa107e5a2010-10-27 23:44:47 -04001172 sector_t nr_blocks, gfp_t gfp_mask)
Lukas Czernere6fa0be2010-10-27 21:30:04 -04001173{
1174 return blkdev_issue_zeroout(sb->s_bdev,
1175 block << (sb->s_blocksize_bits - 9),
1176 nr_blocks << (sb->s_blocksize_bits - 9),
Martin K. Petersend93ba7a2015-01-20 20:06:30 -05001177 gfp_mask, true);
Lukas Czernere6fa0be2010-10-27 21:30:04 -04001178}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001179
Jens Axboe018e0442009-06-26 16:27:10 +02001180extern int blk_verify_command(unsigned char *cmd, fmode_t has_write_perm);
Adel Gadllah0b07de82008-06-26 13:48:27 +02001181
Martin K. Peterseneb28d312010-02-26 00:20:37 -05001182enum blk_default_limits {
1183 BLK_MAX_SEGMENTS = 128,
1184 BLK_SAFE_MAX_SECTORS = 255,
Jeff Moyerd2be5372015-08-13 14:57:57 -04001185 BLK_DEF_MAX_SECTORS = 2560,
Martin K. Peterseneb28d312010-02-26 00:20:37 -05001186 BLK_MAX_SEGMENT_SIZE = 65536,
1187 BLK_SEG_BOUNDARY_MASK = 0xFFFFFFFFUL,
1188};
Milan Broz0e435ac2008-12-03 12:55:08 +01001189
Linus Torvalds1da177e2005-04-16 15:20:36 -07001190#define blkdev_entry_to_request(entry) list_entry((entry), struct request, queuelist)
1191
Martin K. Petersenae03bf62009-05-22 17:17:50 -04001192static inline unsigned long queue_bounce_pfn(struct request_queue *q)
1193{
Martin K. Petersen025146e2009-05-22 17:17:51 -04001194 return q->limits.bounce_pfn;
Martin K. Petersenae03bf62009-05-22 17:17:50 -04001195}
1196
1197static inline unsigned long queue_segment_boundary(struct request_queue *q)
1198{
Martin K. Petersen025146e2009-05-22 17:17:51 -04001199 return q->limits.seg_boundary_mask;
Martin K. Petersenae03bf62009-05-22 17:17:50 -04001200}
1201
Keith Busch03100aa2015-08-19 14:24:05 -07001202static inline unsigned long queue_virt_boundary(struct request_queue *q)
1203{
1204 return q->limits.virt_boundary_mask;
1205}
1206
Martin K. Petersenae03bf62009-05-22 17:17:50 -04001207static inline unsigned int queue_max_sectors(struct request_queue *q)
1208{
Martin K. Petersen025146e2009-05-22 17:17:51 -04001209 return q->limits.max_sectors;
Martin K. Petersenae03bf62009-05-22 17:17:50 -04001210}
1211
1212static inline unsigned int queue_max_hw_sectors(struct request_queue *q)
1213{
Martin K. Petersen025146e2009-05-22 17:17:51 -04001214 return q->limits.max_hw_sectors;
Martin K. Petersenae03bf62009-05-22 17:17:50 -04001215}
1216
Martin K. Petersen8a783622010-02-26 00:20:39 -05001217static inline unsigned short queue_max_segments(struct request_queue *q)
Martin K. Petersenae03bf62009-05-22 17:17:50 -04001218{
Martin K. Petersen8a783622010-02-26 00:20:39 -05001219 return q->limits.max_segments;
Martin K. Petersenae03bf62009-05-22 17:17:50 -04001220}
1221
1222static inline unsigned int queue_max_segment_size(struct request_queue *q)
1223{
Martin K. Petersen025146e2009-05-22 17:17:51 -04001224 return q->limits.max_segment_size;
Martin K. Petersenae03bf62009-05-22 17:17:50 -04001225}
1226
Martin K. Petersene1defc42009-05-22 17:17:49 -04001227static inline unsigned short queue_logical_block_size(struct request_queue *q)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001228{
1229 int retval = 512;
1230
Martin K. Petersen025146e2009-05-22 17:17:51 -04001231 if (q && q->limits.logical_block_size)
1232 retval = q->limits.logical_block_size;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001233
1234 return retval;
1235}
1236
Martin K. Petersene1defc42009-05-22 17:17:49 -04001237static inline unsigned short bdev_logical_block_size(struct block_device *bdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001238{
Martin K. Petersene1defc42009-05-22 17:17:49 -04001239 return queue_logical_block_size(bdev_get_queue(bdev));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001240}
1241
Martin K. Petersenc72758f2009-05-22 17:17:53 -04001242static inline unsigned int queue_physical_block_size(struct request_queue *q)
1243{
1244 return q->limits.physical_block_size;
1245}
1246
Martin K. Petersen892b6f92010-10-13 21:18:03 +02001247static inline unsigned int bdev_physical_block_size(struct block_device *bdev)
Martin K. Petersenac481c22009-10-03 20:52:01 +02001248{
1249 return queue_physical_block_size(bdev_get_queue(bdev));
1250}
1251
Martin K. Petersenc72758f2009-05-22 17:17:53 -04001252static inline unsigned int queue_io_min(struct request_queue *q)
1253{
1254 return q->limits.io_min;
1255}
1256
Martin K. Petersenac481c22009-10-03 20:52:01 +02001257static inline int bdev_io_min(struct block_device *bdev)
1258{
1259 return queue_io_min(bdev_get_queue(bdev));
1260}
1261
Martin K. Petersenc72758f2009-05-22 17:17:53 -04001262static inline unsigned int queue_io_opt(struct request_queue *q)
1263{
1264 return q->limits.io_opt;
1265}
1266
Martin K. Petersenac481c22009-10-03 20:52:01 +02001267static inline int bdev_io_opt(struct block_device *bdev)
1268{
1269 return queue_io_opt(bdev_get_queue(bdev));
1270}
1271
Martin K. Petersenc72758f2009-05-22 17:17:53 -04001272static inline int queue_alignment_offset(struct request_queue *q)
1273{
Martin K. Petersenac481c22009-10-03 20:52:01 +02001274 if (q->limits.misaligned)
Martin K. Petersenc72758f2009-05-22 17:17:53 -04001275 return -1;
1276
Martin K. Petersenac481c22009-10-03 20:52:01 +02001277 return q->limits.alignment_offset;
Martin K. Petersenc72758f2009-05-22 17:17:53 -04001278}
1279
Martin K. Petersene03a72e2010-01-11 03:21:51 -05001280static inline int queue_limit_alignment_offset(struct queue_limits *lim, sector_t sector)
Martin K. Petersen81744ee2009-12-29 08:35:35 +01001281{
1282 unsigned int granularity = max(lim->physical_block_size, lim->io_min);
Mike Snitzerb8839b82014-10-08 18:26:13 -04001283 unsigned int alignment = sector_div(sector, granularity >> 9) << 9;
Martin K. Petersen81744ee2009-12-29 08:35:35 +01001284
Mike Snitzerb8839b82014-10-08 18:26:13 -04001285 return (granularity + lim->alignment_offset - alignment) % granularity;
Martin K. Petersenc72758f2009-05-22 17:17:53 -04001286}
1287
Martin K. Petersenac481c22009-10-03 20:52:01 +02001288static inline int bdev_alignment_offset(struct block_device *bdev)
1289{
1290 struct request_queue *q = bdev_get_queue(bdev);
1291
1292 if (q->limits.misaligned)
1293 return -1;
1294
1295 if (bdev != bdev->bd_contains)
1296 return bdev->bd_part->alignment_offset;
1297
1298 return q->limits.alignment_offset;
1299}
1300
Martin K. Petersen86b37282009-11-10 11:50:21 +01001301static inline int queue_discard_alignment(struct request_queue *q)
1302{
1303 if (q->limits.discard_misaligned)
1304 return -1;
1305
1306 return q->limits.discard_alignment;
1307}
1308
Martin K. Petersene03a72e2010-01-11 03:21:51 -05001309static inline int queue_limit_discard_alignment(struct queue_limits *lim, sector_t sector)
Martin K. Petersen86b37282009-11-10 11:50:21 +01001310{
Linus Torvalds59771072012-12-19 07:18:35 -08001311 unsigned int alignment, granularity, offset;
Martin K. Petersendd3d1452010-01-11 03:21:48 -05001312
Martin K. Petersena934a002011-05-18 10:37:35 +02001313 if (!lim->max_discard_sectors)
1314 return 0;
1315
Linus Torvalds59771072012-12-19 07:18:35 -08001316 /* Why are these in bytes, not sectors? */
1317 alignment = lim->discard_alignment >> 9;
1318 granularity = lim->discard_granularity >> 9;
1319 if (!granularity)
1320 return 0;
1321
1322 /* Offset of the partition start in 'granularity' sectors */
1323 offset = sector_div(sector, granularity);
1324
1325 /* And why do we do this modulus *again* in blkdev_issue_discard()? */
1326 offset = (granularity + alignment - offset) % granularity;
1327
1328 /* Turn it back into bytes, gaah */
1329 return offset << 9;
Martin K. Petersen86b37282009-11-10 11:50:21 +01001330}
1331
Paolo Bonzinic6e66632012-08-02 09:48:50 +02001332static inline int bdev_discard_alignment(struct block_device *bdev)
1333{
1334 struct request_queue *q = bdev_get_queue(bdev);
1335
1336 if (bdev != bdev->bd_contains)
1337 return bdev->bd_part->discard_alignment;
1338
1339 return q->limits.discard_alignment;
1340}
1341
Martin K. Petersen98262f22009-12-03 09:24:48 +01001342static inline unsigned int queue_discard_zeroes_data(struct request_queue *q)
1343{
Martin K. Petersena934a002011-05-18 10:37:35 +02001344 if (q->limits.max_discard_sectors && q->limits.discard_zeroes_data == 1)
Martin K. Petersen98262f22009-12-03 09:24:48 +01001345 return 1;
1346
1347 return 0;
1348}
1349
1350static inline unsigned int bdev_discard_zeroes_data(struct block_device *bdev)
1351{
1352 return queue_discard_zeroes_data(bdev_get_queue(bdev));
1353}
1354
Martin K. Petersen4363ac72012-09-18 12:19:27 -04001355static inline unsigned int bdev_write_same(struct block_device *bdev)
1356{
1357 struct request_queue *q = bdev_get_queue(bdev);
1358
1359 if (q)
1360 return q->limits.max_write_same_sectors;
1361
1362 return 0;
1363}
1364
Jens Axboe165125e2007-07-24 09:28:11 +02001365static inline int queue_dma_alignment(struct request_queue *q)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001366{
Pete Wyckoff482eb682008-01-01 10:23:02 -05001367 return q ? q->dma_alignment : 511;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001368}
1369
Namhyung Kim14417792010-09-15 13:08:27 +02001370static inline int blk_rq_aligned(struct request_queue *q, unsigned long addr,
FUJITA Tomonori87904072008-08-28 15:05:58 +09001371 unsigned int len)
1372{
1373 unsigned int alignment = queue_dma_alignment(q) | q->dma_pad_mask;
Namhyung Kim14417792010-09-15 13:08:27 +02001374 return !(addr & alignment) && !(len & alignment);
FUJITA Tomonori87904072008-08-28 15:05:58 +09001375}
1376
Linus Torvalds1da177e2005-04-16 15:20:36 -07001377/* assumes size > 256 */
1378static inline unsigned int blksize_bits(unsigned int size)
1379{
1380 unsigned int bits = 8;
1381 do {
1382 bits++;
1383 size >>= 1;
1384 } while (size > 256);
1385 return bits;
1386}
1387
Adrian Bunk2befb9e2005-09-10 00:27:17 -07001388static inline unsigned int block_size(struct block_device *bdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001389{
1390 return bdev->bd_block_size;
1391}
1392
shaohua.li@intel.comf3876932011-05-06 11:34:32 -06001393static inline bool queue_flush_queueable(struct request_queue *q)
1394{
Jens Axboec888a8f2016-04-13 13:33:19 -06001395 return !test_bit(QUEUE_FLAG_FLUSH_NQ, &q->queue_flags);
shaohua.li@intel.comf3876932011-05-06 11:34:32 -06001396}
1397
Linus Torvalds1da177e2005-04-16 15:20:36 -07001398typedef struct {struct page *v;} Sector;
1399
1400unsigned char *read_dev_sector(struct block_device *, sector_t, Sector *);
1401
1402static inline void put_dev_sector(Sector p)
1403{
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001404 put_page(p.v);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001405}
1406
Ming Leie0af2912016-02-26 23:40:51 +08001407static inline bool __bvec_gap_to_prev(struct request_queue *q,
1408 struct bio_vec *bprv, unsigned int offset)
1409{
1410 return offset ||
1411 ((bprv->bv_offset + bprv->bv_len) & queue_virt_boundary(q));
1412}
1413
Keith Busch03100aa2015-08-19 14:24:05 -07001414/*
1415 * Check if adding a bio_vec after bprv with offset would create a gap in
1416 * the SG list. Most drivers don't care about this, but some do.
1417 */
1418static inline bool bvec_gap_to_prev(struct request_queue *q,
1419 struct bio_vec *bprv, unsigned int offset)
1420{
1421 if (!queue_virt_boundary(q))
1422 return false;
Ming Leie0af2912016-02-26 23:40:51 +08001423 return __bvec_gap_to_prev(q, bprv, offset);
Keith Busch03100aa2015-08-19 14:24:05 -07001424}
1425
Jens Axboe5e7c4272015-09-03 19:28:20 +03001426static inline bool bio_will_gap(struct request_queue *q, struct bio *prev,
1427 struct bio *next)
1428{
Ming Lei25e71a92016-02-26 23:40:52 +08001429 if (bio_has_data(prev) && queue_virt_boundary(q)) {
1430 struct bio_vec pb, nb;
Jens Axboe5e7c4272015-09-03 19:28:20 +03001431
Ming Lei25e71a92016-02-26 23:40:52 +08001432 bio_get_last_bvec(prev, &pb);
1433 bio_get_first_bvec(next, &nb);
1434
1435 return __bvec_gap_to_prev(q, &pb, nb.bv_offset);
1436 }
1437
1438 return false;
Jens Axboe5e7c4272015-09-03 19:28:20 +03001439}
1440
1441static inline bool req_gap_back_merge(struct request *req, struct bio *bio)
1442{
1443 return bio_will_gap(req->q, req->biotail, bio);
1444}
1445
1446static inline bool req_gap_front_merge(struct request *req, struct bio *bio)
1447{
1448 return bio_will_gap(req->q, bio, req->bio);
1449}
1450
Jens Axboe59c3d452014-04-08 09:15:35 -06001451int kblockd_schedule_work(struct work_struct *work);
Jens Axboeee63cfa2016-08-24 15:52:48 -06001452int kblockd_schedule_work_on(int cpu, struct work_struct *work);
Jens Axboe59c3d452014-04-08 09:15:35 -06001453int kblockd_schedule_delayed_work(struct delayed_work *dwork, unsigned long delay);
Jens Axboe8ab14592014-04-08 09:17:40 -06001454int kblockd_schedule_delayed_work_on(int cpu, struct delayed_work *dwork, unsigned long delay);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001455
Divyesh Shah91952912010-04-01 15:01:41 -07001456#ifdef CONFIG_BLK_CGROUP
Jens Axboe28f41972010-06-01 12:23:18 +02001457/*
1458 * This should not be using sched_clock(). A real patch is in progress
1459 * to fix this up, until that is in place we need to disable preemption
1460 * around sched_clock() in this function and set_io_start_time_ns().
1461 */
Divyesh Shah91952912010-04-01 15:01:41 -07001462static inline void set_start_time_ns(struct request *req)
1463{
Jens Axboe28f41972010-06-01 12:23:18 +02001464 preempt_disable();
Divyesh Shah91952912010-04-01 15:01:41 -07001465 req->start_time_ns = sched_clock();
Jens Axboe28f41972010-06-01 12:23:18 +02001466 preempt_enable();
Divyesh Shah91952912010-04-01 15:01:41 -07001467}
1468
1469static inline void set_io_start_time_ns(struct request *req)
1470{
Jens Axboe28f41972010-06-01 12:23:18 +02001471 preempt_disable();
Divyesh Shah91952912010-04-01 15:01:41 -07001472 req->io_start_time_ns = sched_clock();
Jens Axboe28f41972010-06-01 12:23:18 +02001473 preempt_enable();
Divyesh Shah91952912010-04-01 15:01:41 -07001474}
Divyesh Shah84c124d2010-04-09 08:31:19 +02001475
1476static inline uint64_t rq_start_time_ns(struct request *req)
1477{
1478 return req->start_time_ns;
1479}
1480
1481static inline uint64_t rq_io_start_time_ns(struct request *req)
1482{
1483 return req->io_start_time_ns;
1484}
Divyesh Shah91952912010-04-01 15:01:41 -07001485#else
1486static inline void set_start_time_ns(struct request *req) {}
1487static inline void set_io_start_time_ns(struct request *req) {}
Divyesh Shah84c124d2010-04-09 08:31:19 +02001488static inline uint64_t rq_start_time_ns(struct request *req)
1489{
1490 return 0;
1491}
1492static inline uint64_t rq_io_start_time_ns(struct request *req)
1493{
1494 return 0;
1495}
Divyesh Shah91952912010-04-01 15:01:41 -07001496#endif
1497
Linus Torvalds1da177e2005-04-16 15:20:36 -07001498#define MODULE_ALIAS_BLOCKDEV(major,minor) \
1499 MODULE_ALIAS("block-major-" __stringify(major) "-" __stringify(minor))
1500#define MODULE_ALIAS_BLOCKDEV_MAJOR(major) \
1501 MODULE_ALIAS("block-major-" __stringify(major) "-*")
1502
Martin K. Petersen7ba1ba12008-06-30 20:04:41 +02001503#if defined(CONFIG_BLK_DEV_INTEGRITY)
1504
Martin K. Petersen8288f492014-09-26 19:20:02 -04001505enum blk_integrity_flags {
1506 BLK_INTEGRITY_VERIFY = 1 << 0,
1507 BLK_INTEGRITY_GENERATE = 1 << 1,
Martin K. Petersen3aec2f42014-09-26 19:20:03 -04001508 BLK_INTEGRITY_DEVICE_CAPABLE = 1 << 2,
Martin K. Petersenaae7df52014-09-26 19:20:05 -04001509 BLK_INTEGRITY_IP_CHECKSUM = 1 << 3,
Martin K. Petersen8288f492014-09-26 19:20:02 -04001510};
Martin K. Petersen7ba1ba12008-06-30 20:04:41 +02001511
Martin K. Petersen18593082014-09-26 19:20:01 -04001512struct blk_integrity_iter {
Martin K. Petersen7ba1ba12008-06-30 20:04:41 +02001513 void *prot_buf;
1514 void *data_buf;
Martin K. Petersen3be91c42014-09-26 19:19:59 -04001515 sector_t seed;
Martin K. Petersen7ba1ba12008-06-30 20:04:41 +02001516 unsigned int data_size;
Martin K. Petersen3be91c42014-09-26 19:19:59 -04001517 unsigned short interval;
Martin K. Petersen7ba1ba12008-06-30 20:04:41 +02001518 const char *disk_name;
1519};
1520
Martin K. Petersen18593082014-09-26 19:20:01 -04001521typedef int (integrity_processing_fn) (struct blk_integrity_iter *);
Martin K. Petersen7ba1ba12008-06-30 20:04:41 +02001522
Martin K. Petersen0f8087e2015-10-21 13:19:33 -04001523struct blk_integrity_profile {
1524 integrity_processing_fn *generate_fn;
1525 integrity_processing_fn *verify_fn;
1526 const char *name;
1527};
1528
Martin K. Petersen25520d52015-10-21 13:19:49 -04001529extern void blk_integrity_register(struct gendisk *, struct blk_integrity *);
Martin K. Petersen7ba1ba12008-06-30 20:04:41 +02001530extern void blk_integrity_unregister(struct gendisk *);
Martin K. Petersenad7fce92008-10-01 03:38:39 -04001531extern int blk_integrity_compare(struct gendisk *, struct gendisk *);
Martin K. Petersen13f05c82010-09-10 20:50:10 +02001532extern int blk_rq_map_integrity_sg(struct request_queue *, struct bio *,
1533 struct scatterlist *);
1534extern int blk_rq_count_integrity_sg(struct request_queue *, struct bio *);
Martin K. Petersen4eaf99b2014-09-26 19:20:06 -04001535extern bool blk_integrity_merge_rq(struct request_queue *, struct request *,
1536 struct request *);
1537extern bool blk_integrity_merge_bio(struct request_queue *, struct request *,
1538 struct bio *);
Martin K. Petersen7ba1ba12008-06-30 20:04:41 +02001539
Martin K. Petersen25520d52015-10-21 13:19:49 -04001540static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk)
1541{
Dan Williamsac6fc482015-10-21 13:20:18 -04001542 struct blk_integrity *bi = &disk->queue->integrity;
Martin K. Petersen25520d52015-10-21 13:19:49 -04001543
1544 if (!bi->profile)
1545 return NULL;
1546
1547 return bi;
1548}
1549
Jens Axboeb04accc2008-10-02 12:53:22 +02001550static inline
1551struct blk_integrity *bdev_get_integrity(struct block_device *bdev)
1552{
Martin K. Petersen25520d52015-10-21 13:19:49 -04001553 return blk_get_integrity(bdev->bd_disk);
Martin K. Petersenb02739b2008-10-02 18:47:49 +02001554}
1555
Martin K. Petersen180b2f92014-09-26 19:19:56 -04001556static inline bool blk_integrity_rq(struct request *rq)
Martin K. Petersen7ba1ba12008-06-30 20:04:41 +02001557{
Martin K. Petersen180b2f92014-09-26 19:19:56 -04001558 return rq->cmd_flags & REQ_INTEGRITY;
Martin K. Petersen7ba1ba12008-06-30 20:04:41 +02001559}
1560
Martin K. Petersen13f05c82010-09-10 20:50:10 +02001561static inline void blk_queue_max_integrity_segments(struct request_queue *q,
1562 unsigned int segs)
1563{
1564 q->limits.max_integrity_segments = segs;
1565}
1566
1567static inline unsigned short
1568queue_max_integrity_segments(struct request_queue *q)
1569{
1570 return q->limits.max_integrity_segments;
1571}
1572
Sagi Grimberg7f39add2015-09-11 09:03:04 -06001573static inline bool integrity_req_gap_back_merge(struct request *req,
1574 struct bio *next)
1575{
1576 struct bio_integrity_payload *bip = bio_integrity(req->bio);
1577 struct bio_integrity_payload *bip_next = bio_integrity(next);
1578
1579 return bvec_gap_to_prev(req->q, &bip->bip_vec[bip->bip_vcnt - 1],
1580 bip_next->bip_vec[0].bv_offset);
1581}
1582
1583static inline bool integrity_req_gap_front_merge(struct request *req,
1584 struct bio *bio)
1585{
1586 struct bio_integrity_payload *bip = bio_integrity(bio);
1587 struct bio_integrity_payload *bip_next = bio_integrity(req->bio);
1588
1589 return bvec_gap_to_prev(req->q, &bip->bip_vec[bip->bip_vcnt - 1],
1590 bip_next->bip_vec[0].bv_offset);
1591}
1592
Martin K. Petersen7ba1ba12008-06-30 20:04:41 +02001593#else /* CONFIG_BLK_DEV_INTEGRITY */
1594
Stephen Rothwellfd832402012-01-12 09:17:30 +01001595struct bio;
1596struct block_device;
1597struct gendisk;
1598struct blk_integrity;
1599
1600static inline int blk_integrity_rq(struct request *rq)
1601{
1602 return 0;
1603}
1604static inline int blk_rq_count_integrity_sg(struct request_queue *q,
1605 struct bio *b)
1606{
1607 return 0;
1608}
1609static inline int blk_rq_map_integrity_sg(struct request_queue *q,
1610 struct bio *b,
1611 struct scatterlist *s)
1612{
1613 return 0;
1614}
1615static inline struct blk_integrity *bdev_get_integrity(struct block_device *b)
1616{
Michele Curti61a04e52014-10-09 15:30:17 -07001617 return NULL;
Stephen Rothwellfd832402012-01-12 09:17:30 +01001618}
1619static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk)
1620{
1621 return NULL;
1622}
1623static inline int blk_integrity_compare(struct gendisk *a, struct gendisk *b)
1624{
1625 return 0;
1626}
Martin K. Petersen25520d52015-10-21 13:19:49 -04001627static inline void blk_integrity_register(struct gendisk *d,
Stephen Rothwellfd832402012-01-12 09:17:30 +01001628 struct blk_integrity *b)
1629{
Stephen Rothwellfd832402012-01-12 09:17:30 +01001630}
1631static inline void blk_integrity_unregister(struct gendisk *d)
1632{
1633}
1634static inline void blk_queue_max_integrity_segments(struct request_queue *q,
1635 unsigned int segs)
1636{
1637}
1638static inline unsigned short queue_max_integrity_segments(struct request_queue *q)
1639{
1640 return 0;
1641}
Martin K. Petersen4eaf99b2014-09-26 19:20:06 -04001642static inline bool blk_integrity_merge_rq(struct request_queue *rq,
1643 struct request *r1,
1644 struct request *r2)
Stephen Rothwellfd832402012-01-12 09:17:30 +01001645{
Martin K. Petersencb1a5ab2014-10-28 20:27:43 -06001646 return true;
Stephen Rothwellfd832402012-01-12 09:17:30 +01001647}
Martin K. Petersen4eaf99b2014-09-26 19:20:06 -04001648static inline bool blk_integrity_merge_bio(struct request_queue *rq,
1649 struct request *r,
1650 struct bio *b)
Stephen Rothwellfd832402012-01-12 09:17:30 +01001651{
Martin K. Petersencb1a5ab2014-10-28 20:27:43 -06001652 return true;
Stephen Rothwellfd832402012-01-12 09:17:30 +01001653}
Martin K. Petersen25520d52015-10-21 13:19:49 -04001654
Sagi Grimberg7f39add2015-09-11 09:03:04 -06001655static inline bool integrity_req_gap_back_merge(struct request *req,
1656 struct bio *next)
1657{
1658 return false;
1659}
1660static inline bool integrity_req_gap_front_merge(struct request *req,
1661 struct bio *bio)
1662{
1663 return false;
1664}
Martin K. Petersen7ba1ba12008-06-30 20:04:41 +02001665
1666#endif /* CONFIG_BLK_DEV_INTEGRITY */
1667
Dan Williamsb2e0d162016-01-15 16:55:59 -08001668/**
1669 * struct blk_dax_ctl - control and output parameters for ->direct_access
1670 * @sector: (input) offset relative to a block_device
1671 * @addr: (output) kernel virtual address for @sector populated by driver
1672 * @pfn: (output) page frame number for @addr populated by driver
1673 * @size: (input) number of bytes requested
1674 */
1675struct blk_dax_ctl {
1676 sector_t sector;
Dan Williams7a9eb202016-06-03 18:06:47 -07001677 void *addr;
Dan Williamsb2e0d162016-01-15 16:55:59 -08001678 long size;
Dan Williams34c0fd52016-01-15 16:56:14 -08001679 pfn_t pfn;
Dan Williamsb2e0d162016-01-15 16:55:59 -08001680};
1681
Al Viro08f85852007-10-08 13:26:20 -04001682struct block_device_operations {
Al Virod4430d622008-03-02 09:09:22 -05001683 int (*open) (struct block_device *, fmode_t);
Al Virodb2a1442013-05-05 21:52:57 -04001684 void (*release) (struct gendisk *, fmode_t);
Jens Axboec11f0c02016-08-05 08:11:04 -06001685 int (*rw_page)(struct block_device *, sector_t, struct page *, bool);
Al Virod4430d622008-03-02 09:09:22 -05001686 int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
1687 int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
Dan Williams7a9eb202016-06-03 18:06:47 -07001688 long (*direct_access)(struct block_device *, sector_t, void **, pfn_t *,
1689 long);
Tejun Heo77ea8872010-12-08 20:57:37 +01001690 unsigned int (*check_events) (struct gendisk *disk,
1691 unsigned int clearing);
1692 /* ->media_changed() is DEPRECATED, use ->check_events() instead */
Al Viro08f85852007-10-08 13:26:20 -04001693 int (*media_changed) (struct gendisk *);
Tejun Heoc3e33e02010-05-15 20:09:29 +02001694 void (*unlock_native_capacity) (struct gendisk *);
Al Viro08f85852007-10-08 13:26:20 -04001695 int (*revalidate_disk) (struct gendisk *);
1696 int (*getgeo)(struct block_device *, struct hd_geometry *);
Nitin Guptab3a27d02010-05-17 11:02:43 +05301697 /* this callback is with swap_lock and sometimes page table lock held */
1698 void (*swap_slot_free_notify) (struct block_device *, unsigned long);
Al Viro08f85852007-10-08 13:26:20 -04001699 struct module *owner;
Christoph Hellwigbbd3e062015-10-15 14:10:48 +02001700 const struct pr_ops *pr_ops;
Al Viro08f85852007-10-08 13:26:20 -04001701};
1702
Al Viro633a08b2007-08-29 20:34:12 -04001703extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
1704 unsigned long);
Matthew Wilcox47a191f2014-06-04 16:07:46 -07001705extern int bdev_read_page(struct block_device *, sector_t, struct page *);
1706extern int bdev_write_page(struct block_device *, sector_t, struct page *,
1707 struct writeback_control *);
Dan Williamsb2e0d162016-01-15 16:55:59 -08001708extern long bdev_direct_access(struct block_device *, struct blk_dax_ctl *);
Toshi Kani2d96afc2016-05-10 10:23:53 -06001709extern int bdev_dax_supported(struct super_block *, int);
Toshi Kania8078b12016-05-10 10:23:57 -06001710extern bool bdev_dax_capable(struct block_device *);
Mohan Srinivasane2d88782016-12-14 15:55:36 -08001711
1712/*
1713 * X-axis for IO latency histogram support.
1714 */
1715static const u_int64_t latency_x_axis_us[] = {
1716 100,
1717 200,
1718 300,
1719 400,
1720 500,
1721 600,
1722 700,
1723 800,
1724 900,
1725 1000,
1726 1200,
1727 1400,
1728 1600,
1729 1800,
1730 2000,
1731 2500,
1732 3000,
1733 4000,
1734 5000,
1735 6000,
1736 7000,
1737 9000,
1738 10000
1739};
1740
1741#define BLK_IO_LAT_HIST_DISABLE 0
1742#define BLK_IO_LAT_HIST_ENABLE 1
1743#define BLK_IO_LAT_HIST_ZERO 2
1744
1745struct io_latency_state {
1746 u_int64_t latency_y_axis_read[ARRAY_SIZE(latency_x_axis_us) + 1];
1747 u_int64_t latency_reads_elems;
1748 u_int64_t latency_y_axis_write[ARRAY_SIZE(latency_x_axis_us) + 1];
1749 u_int64_t latency_writes_elems;
1750};
1751
1752static inline void
1753blk_update_latency_hist(struct io_latency_state *s,
1754 int read,
1755 u_int64_t delta_us)
1756{
1757 int i;
1758
1759 for (i = 0; i < ARRAY_SIZE(latency_x_axis_us); i++) {
1760 if (delta_us < (u_int64_t)latency_x_axis_us[i]) {
1761 if (read)
1762 s->latency_y_axis_read[i]++;
1763 else
1764 s->latency_y_axis_write[i]++;
1765 break;
1766 }
1767 }
1768 if (i == ARRAY_SIZE(latency_x_axis_us)) {
1769 /* Overflowed the histogram */
1770 if (read)
1771 s->latency_y_axis_read[i]++;
1772 else
1773 s->latency_y_axis_write[i]++;
1774 }
1775 if (read)
1776 s->latency_reads_elems++;
1777 else
1778 s->latency_writes_elems++;
1779}
1780
1781void blk_zero_latency_hist(struct io_latency_state *s);
1782ssize_t blk_latency_hist_show(struct io_latency_state *s, char *buf);
1783
David Howells93614012006-09-30 20:45:40 +02001784#else /* CONFIG_BLOCK */
Fabian Frederickac13a822014-06-04 16:06:27 -07001785
1786struct block_device;
1787
David Howells93614012006-09-30 20:45:40 +02001788/*
1789 * stubs for when the block layer is configured out
1790 */
1791#define buffer_heads_over_limit 0
1792
David Howells93614012006-09-30 20:45:40 +02001793static inline long nr_blockdev_pages(void)
1794{
1795 return 0;
1796}
1797
Jens Axboe1f940bd2011-03-11 20:17:08 +01001798struct blk_plug {
1799};
1800
1801static inline void blk_start_plug(struct blk_plug *plug)
Jens Axboe73c10102011-03-08 13:19:51 +01001802{
1803}
1804
Jens Axboe1f940bd2011-03-11 20:17:08 +01001805static inline void blk_finish_plug(struct blk_plug *plug)
Jens Axboe73c10102011-03-08 13:19:51 +01001806{
1807}
1808
Jens Axboe1f940bd2011-03-11 20:17:08 +01001809static inline void blk_flush_plug(struct task_struct *task)
Jens Axboe73c10102011-03-08 13:19:51 +01001810{
1811}
1812
Jens Axboea237c1c2011-04-16 13:27:55 +02001813static inline void blk_schedule_flush_plug(struct task_struct *task)
1814{
1815}
1816
1817
Jens Axboe73c10102011-03-08 13:19:51 +01001818static inline bool blk_needs_flush_plug(struct task_struct *tsk)
1819{
1820 return false;
1821}
1822
Fabian Frederickac13a822014-06-04 16:06:27 -07001823static inline int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask,
1824 sector_t *error_sector)
1825{
1826 return 0;
1827}
1828
David Howells93614012006-09-30 20:45:40 +02001829#endif /* CONFIG_BLOCK */
1830
Linus Torvalds1da177e2005-04-16 15:20:36 -07001831#endif