blob: b8362c0df51d582fea9ab43377adc0a0e0e055b0 [file] [log] [blame]
Jens Axboe8324aa92008-01-29 14:51:59 +01001/*
2 * Functions related to sysfs handling
3 */
4#include <linux/kernel.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +09005#include <linux/slab.h>
Jens Axboe8324aa92008-01-29 14:51:59 +01006#include <linux/module.h>
7#include <linux/bio.h>
8#include <linux/blkdev.h>
Tejun Heo66114ca2015-05-22 17:13:32 -04009#include <linux/backing-dev.h>
Jens Axboe8324aa92008-01-29 14:51:59 +010010#include <linux/blktrace_api.h>
Jens Axboe320ae512013-10-24 09:20:05 +010011#include <linux/blk-mq.h>
Tejun Heoeea8f412015-05-22 17:13:17 -040012#include <linux/blk-cgroup.h>
Jens Axboe8324aa92008-01-29 14:51:59 +010013
14#include "blk.h"
Ming Lei3edcc0c2013-12-26 21:31:38 +080015#include "blk-mq.h"
Omar Sandovald173a252017-05-04 00:31:30 -070016#include "blk-mq-debugfs.h"
Jens Axboe87760e52016-11-09 12:38:14 -070017#include "blk-wbt.h"
Jens Axboe8324aa92008-01-29 14:51:59 +010018
19struct queue_sysfs_entry {
20 struct attribute attr;
21 ssize_t (*show)(struct request_queue *, char *);
22 ssize_t (*store)(struct request_queue *, const char *, size_t);
23};
24
25static ssize_t
Xiaotian Feng9cb308c2009-07-17 15:26:26 +080026queue_var_show(unsigned long var, char *page)
Jens Axboe8324aa92008-01-29 14:51:59 +010027{
Xiaotian Feng9cb308c2009-07-17 15:26:26 +080028 return sprintf(page, "%lu\n", var);
Jens Axboe8324aa92008-01-29 14:51:59 +010029}
30
31static ssize_t
32queue_var_store(unsigned long *var, const char *page, size_t count)
33{
Dave Reisnerb1f3b642012-09-08 11:55:45 -040034 int err;
35 unsigned long v;
Jens Axboe8324aa92008-01-29 14:51:59 +010036
Jingoo Haned751e62013-09-11 14:20:08 -070037 err = kstrtoul(page, 10, &v);
Dave Reisnerb1f3b642012-09-08 11:55:45 -040038 if (err || v > UINT_MAX)
39 return -EINVAL;
40
41 *var = v;
42
Jens Axboe8324aa92008-01-29 14:51:59 +010043 return count;
44}
45
Jens Axboe80e091d2016-11-28 09:22:47 -070046static ssize_t queue_var_store64(s64 *var, const char *page)
Jens Axboe87760e52016-11-09 12:38:14 -070047{
48 int err;
Jens Axboe80e091d2016-11-28 09:22:47 -070049 s64 v;
Jens Axboe87760e52016-11-09 12:38:14 -070050
Jens Axboe80e091d2016-11-28 09:22:47 -070051 err = kstrtos64(page, 10, &v);
Jens Axboe87760e52016-11-09 12:38:14 -070052 if (err < 0)
53 return err;
54
55 *var = v;
56 return 0;
57}
58
Jens Axboe8324aa92008-01-29 14:51:59 +010059static ssize_t queue_requests_show(struct request_queue *q, char *page)
60{
61 return queue_var_show(q->nr_requests, (page));
62}
63
64static ssize_t
65queue_requests_store(struct request_queue *q, const char *page, size_t count)
66{
Jens Axboe8324aa92008-01-29 14:51:59 +010067 unsigned long nr;
Jens Axboee3a2b3f2014-05-20 11:49:02 -060068 int ret, err;
Jens Axboeb8a9ae72009-09-11 22:44:29 +020069
Jens Axboee3a2b3f2014-05-20 11:49:02 -060070 if (!q->request_fn && !q->mq_ops)
Jens Axboeb8a9ae72009-09-11 22:44:29 +020071 return -EINVAL;
72
73 ret = queue_var_store(&nr, page, count);
Dave Reisnerb1f3b642012-09-08 11:55:45 -040074 if (ret < 0)
75 return ret;
76
Jens Axboe8324aa92008-01-29 14:51:59 +010077 if (nr < BLKDEV_MIN_RQ)
78 nr = BLKDEV_MIN_RQ;
79
Jens Axboee3a2b3f2014-05-20 11:49:02 -060080 if (q->request_fn)
81 err = blk_update_nr_requests(q, nr);
82 else
83 err = blk_mq_update_nr_requests(q, nr);
Jens Axboe8324aa92008-01-29 14:51:59 +010084
Jens Axboee3a2b3f2014-05-20 11:49:02 -060085 if (err)
86 return err;
Tejun Heoa0516612012-06-26 15:05:44 -070087
Jens Axboe8324aa92008-01-29 14:51:59 +010088 return ret;
89}
90
91static ssize_t queue_ra_show(struct request_queue *q, char *page)
92{
Jan Karadc3b17c2017-02-02 15:56:50 +010093 unsigned long ra_kb = q->backing_dev_info->ra_pages <<
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +030094 (PAGE_SHIFT - 10);
Jens Axboe8324aa92008-01-29 14:51:59 +010095
96 return queue_var_show(ra_kb, (page));
97}
98
99static ssize_t
100queue_ra_store(struct request_queue *q, const char *page, size_t count)
101{
102 unsigned long ra_kb;
103 ssize_t ret = queue_var_store(&ra_kb, page, count);
104
Dave Reisnerb1f3b642012-09-08 11:55:45 -0400105 if (ret < 0)
106 return ret;
107
Jan Karadc3b17c2017-02-02 15:56:50 +0100108 q->backing_dev_info->ra_pages = ra_kb >> (PAGE_SHIFT - 10);
Jens Axboe8324aa92008-01-29 14:51:59 +0100109
110 return ret;
111}
112
113static ssize_t queue_max_sectors_show(struct request_queue *q, char *page)
114{
Martin K. Petersenae03bf62009-05-22 17:17:50 -0400115 int max_sectors_kb = queue_max_sectors(q) >> 1;
Jens Axboe8324aa92008-01-29 14:51:59 +0100116
117 return queue_var_show(max_sectors_kb, (page));
118}
119
Martin K. Petersenc77a5712010-03-10 00:48:33 -0500120static ssize_t queue_max_segments_show(struct request_queue *q, char *page)
121{
122 return queue_var_show(queue_max_segments(q), (page));
123}
124
Christoph Hellwig1e739732017-02-08 14:46:49 +0100125static ssize_t queue_max_discard_segments_show(struct request_queue *q,
126 char *page)
127{
128 return queue_var_show(queue_max_discard_segments(q), (page));
129}
130
Martin K. Petersen13f05c82010-09-10 20:50:10 +0200131static ssize_t queue_max_integrity_segments_show(struct request_queue *q, char *page)
132{
133 return queue_var_show(q->limits.max_integrity_segments, (page));
134}
135
Martin K. Petersenc77a5712010-03-10 00:48:33 -0500136static ssize_t queue_max_segment_size_show(struct request_queue *q, char *page)
137{
Martin K. Petersene692cb62010-12-01 19:41:49 +0100138 if (blk_queue_cluster(q))
Martin K. Petersenc77a5712010-03-10 00:48:33 -0500139 return queue_var_show(queue_max_segment_size(q), (page));
140
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300141 return queue_var_show(PAGE_SIZE, (page));
Martin K. Petersenc77a5712010-03-10 00:48:33 -0500142}
143
Martin K. Petersene1defc42009-05-22 17:17:49 -0400144static ssize_t queue_logical_block_size_show(struct request_queue *q, char *page)
Martin K. Petersene68b9032008-01-29 19:14:08 +0100145{
Martin K. Petersene1defc42009-05-22 17:17:49 -0400146 return queue_var_show(queue_logical_block_size(q), page);
Martin K. Petersene68b9032008-01-29 19:14:08 +0100147}
148
Martin K. Petersenc72758f2009-05-22 17:17:53 -0400149static ssize_t queue_physical_block_size_show(struct request_queue *q, char *page)
150{
151 return queue_var_show(queue_physical_block_size(q), page);
152}
153
Hannes Reinecke87caf972016-10-18 15:40:30 +0900154static ssize_t queue_chunk_sectors_show(struct request_queue *q, char *page)
155{
156 return queue_var_show(q->limits.chunk_sectors, page);
157}
158
Martin K. Petersenc72758f2009-05-22 17:17:53 -0400159static ssize_t queue_io_min_show(struct request_queue *q, char *page)
160{
161 return queue_var_show(queue_io_min(q), page);
162}
163
164static ssize_t queue_io_opt_show(struct request_queue *q, char *page)
165{
166 return queue_var_show(queue_io_opt(q), page);
Jens Axboe8324aa92008-01-29 14:51:59 +0100167}
168
Martin K. Petersen86b37282009-11-10 11:50:21 +0100169static ssize_t queue_discard_granularity_show(struct request_queue *q, char *page)
170{
171 return queue_var_show(q->limits.discard_granularity, page);
172}
173
Jens Axboe0034af02015-07-16 09:14:26 -0600174static ssize_t queue_discard_max_hw_show(struct request_queue *q, char *page)
175{
Jens Axboe0034af02015-07-16 09:14:26 -0600176
Alan18f922d02016-02-17 14:15:30 +0000177 return sprintf(page, "%llu\n",
178 (unsigned long long)q->limits.max_hw_discard_sectors << 9);
Jens Axboe0034af02015-07-16 09:14:26 -0600179}
180
Martin K. Petersen86b37282009-11-10 11:50:21 +0100181static ssize_t queue_discard_max_show(struct request_queue *q, char *page)
182{
Martin K. Petersena934a002011-05-18 10:37:35 +0200183 return sprintf(page, "%llu\n",
184 (unsigned long long)q->limits.max_discard_sectors << 9);
Martin K. Petersen86b37282009-11-10 11:50:21 +0100185}
186
Jens Axboe0034af02015-07-16 09:14:26 -0600187static ssize_t queue_discard_max_store(struct request_queue *q,
188 const char *page, size_t count)
189{
190 unsigned long max_discard;
191 ssize_t ret = queue_var_store(&max_discard, page, count);
192
193 if (ret < 0)
194 return ret;
195
196 if (max_discard & (q->limits.discard_granularity - 1))
197 return -EINVAL;
198
199 max_discard >>= 9;
200 if (max_discard > UINT_MAX)
201 return -EINVAL;
202
203 if (max_discard > q->limits.max_hw_discard_sectors)
204 max_discard = q->limits.max_hw_discard_sectors;
205
206 q->limits.max_discard_sectors = max_discard;
207 return ret;
208}
209
Martin K. Petersen98262f22009-12-03 09:24:48 +0100210static ssize_t queue_discard_zeroes_data_show(struct request_queue *q, char *page)
211{
Christoph Hellwig48920ff2017-04-05 19:21:23 +0200212 return queue_var_show(0, page);
Martin K. Petersen98262f22009-12-03 09:24:48 +0100213}
214
Martin K. Petersen4363ac72012-09-18 12:19:27 -0400215static ssize_t queue_write_same_max_show(struct request_queue *q, char *page)
216{
217 return sprintf(page, "%llu\n",
218 (unsigned long long)q->limits.max_write_same_sectors << 9);
219}
220
Chaitanya Kulkarnia6f07882016-11-30 12:28:59 -0800221static ssize_t queue_write_zeroes_max_show(struct request_queue *q, char *page)
222{
223 return sprintf(page, "%llu\n",
224 (unsigned long long)q->limits.max_write_zeroes_sectors << 9);
225}
Martin K. Petersen4363ac72012-09-18 12:19:27 -0400226
Jens Axboe8324aa92008-01-29 14:51:59 +0100227static ssize_t
228queue_max_sectors_store(struct request_queue *q, const char *page, size_t count)
229{
230 unsigned long max_sectors_kb,
Martin K. Petersenae03bf62009-05-22 17:17:50 -0400231 max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1,
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300232 page_kb = 1 << (PAGE_SHIFT - 10);
Jens Axboe8324aa92008-01-29 14:51:59 +0100233 ssize_t ret = queue_var_store(&max_sectors_kb, page, count);
234
Dave Reisnerb1f3b642012-09-08 11:55:45 -0400235 if (ret < 0)
236 return ret;
237
Martin K. Petersenca369d52015-11-13 16:46:48 -0500238 max_hw_sectors_kb = min_not_zero(max_hw_sectors_kb, (unsigned long)
239 q->limits.max_dev_sectors >> 1);
240
Jens Axboe8324aa92008-01-29 14:51:59 +0100241 if (max_sectors_kb > max_hw_sectors_kb || max_sectors_kb < page_kb)
242 return -EINVAL;
Wu Fengguang7c239512008-11-25 09:08:39 +0100243
Jens Axboe8324aa92008-01-29 14:51:59 +0100244 spin_lock_irq(q->queue_lock);
Nikanth Karthikesanc295fc02009-09-01 22:40:15 +0200245 q->limits.max_sectors = max_sectors_kb << 1;
Jan Karadc3b17c2017-02-02 15:56:50 +0100246 q->backing_dev_info->io_pages = max_sectors_kb >> (PAGE_SHIFT - 10);
Jens Axboe8324aa92008-01-29 14:51:59 +0100247 spin_unlock_irq(q->queue_lock);
248
249 return ret;
250}
251
252static ssize_t queue_max_hw_sectors_show(struct request_queue *q, char *page)
253{
Martin K. Petersenae03bf62009-05-22 17:17:50 -0400254 int max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1;
Jens Axboe8324aa92008-01-29 14:51:59 +0100255
256 return queue_var_show(max_hw_sectors_kb, (page));
257}
258
Jens Axboe956bcb72010-08-07 18:13:50 +0200259#define QUEUE_SYSFS_BIT_FNS(name, flag, neg) \
260static ssize_t \
261queue_show_##name(struct request_queue *q, char *page) \
262{ \
263 int bit; \
264 bit = test_bit(QUEUE_FLAG_##flag, &q->queue_flags); \
265 return queue_var_show(neg ? !bit : bit, page); \
266} \
267static ssize_t \
268queue_store_##name(struct request_queue *q, const char *page, size_t count) \
269{ \
270 unsigned long val; \
271 ssize_t ret; \
272 ret = queue_var_store(&val, page, count); \
Arnd Bergmannc678ef52013-04-03 21:53:57 +0200273 if (ret < 0) \
274 return ret; \
Jens Axboe956bcb72010-08-07 18:13:50 +0200275 if (neg) \
276 val = !val; \
277 \
278 spin_lock_irq(q->queue_lock); \
279 if (val) \
280 queue_flag_set(QUEUE_FLAG_##flag, q); \
281 else \
282 queue_flag_clear(QUEUE_FLAG_##flag, q); \
283 spin_unlock_irq(q->queue_lock); \
284 return ret; \
Bartlomiej Zolnierkiewicz1308835f2009-01-07 12:22:39 +0100285}
286
Jens Axboe956bcb72010-08-07 18:13:50 +0200287QUEUE_SYSFS_BIT_FNS(nonrot, NONROT, 1);
288QUEUE_SYSFS_BIT_FNS(random, ADD_RANDOM, 0);
289QUEUE_SYSFS_BIT_FNS(iostats, IO_STAT, 0);
290#undef QUEUE_SYSFS_BIT_FNS
Bartlomiej Zolnierkiewicz1308835f2009-01-07 12:22:39 +0100291
Damien Le Moal797476b2016-10-18 15:40:29 +0900292static ssize_t queue_zoned_show(struct request_queue *q, char *page)
293{
294 switch (blk_queue_zoned_model(q)) {
295 case BLK_ZONED_HA:
296 return sprintf(page, "host-aware\n");
297 case BLK_ZONED_HM:
298 return sprintf(page, "host-managed\n");
299 default:
300 return sprintf(page, "none\n");
301 }
302}
303
Alan D. Brunelleac9fafa2008-04-29 14:44:19 +0200304static ssize_t queue_nomerges_show(struct request_queue *q, char *page)
305{
Alan D. Brunelle488991e2010-01-29 09:04:08 +0100306 return queue_var_show((blk_queue_nomerges(q) << 1) |
307 blk_queue_noxmerges(q), page);
Alan D. Brunelleac9fafa2008-04-29 14:44:19 +0200308}
309
310static ssize_t queue_nomerges_store(struct request_queue *q, const char *page,
311 size_t count)
312{
313 unsigned long nm;
314 ssize_t ret = queue_var_store(&nm, page, count);
315
Dave Reisnerb1f3b642012-09-08 11:55:45 -0400316 if (ret < 0)
317 return ret;
318
Jens Axboebf0f9702008-05-07 09:09:39 +0200319 spin_lock_irq(q->queue_lock);
Alan D. Brunelle488991e2010-01-29 09:04:08 +0100320 queue_flag_clear(QUEUE_FLAG_NOMERGES, q);
321 queue_flag_clear(QUEUE_FLAG_NOXMERGES, q);
322 if (nm == 2)
Jens Axboebf0f9702008-05-07 09:09:39 +0200323 queue_flag_set(QUEUE_FLAG_NOMERGES, q);
Alan D. Brunelle488991e2010-01-29 09:04:08 +0100324 else if (nm)
325 queue_flag_set(QUEUE_FLAG_NOXMERGES, q);
Jens Axboebf0f9702008-05-07 09:09:39 +0200326 spin_unlock_irq(q->queue_lock);
Bartlomiej Zolnierkiewicz1308835f2009-01-07 12:22:39 +0100327
Alan D. Brunelleac9fafa2008-04-29 14:44:19 +0200328 return ret;
329}
330
Jens Axboec7c22e42008-09-13 20:26:01 +0200331static ssize_t queue_rq_affinity_show(struct request_queue *q, char *page)
332{
Xiaotian Feng9cb308c2009-07-17 15:26:26 +0800333 bool set = test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags);
Dan Williams5757a6d2011-07-23 20:44:25 +0200334 bool force = test_bit(QUEUE_FLAG_SAME_FORCE, &q->queue_flags);
Jens Axboec7c22e42008-09-13 20:26:01 +0200335
Dan Williams5757a6d2011-07-23 20:44:25 +0200336 return queue_var_show(set << force, page);
Jens Axboec7c22e42008-09-13 20:26:01 +0200337}
338
339static ssize_t
340queue_rq_affinity_store(struct request_queue *q, const char *page, size_t count)
341{
342 ssize_t ret = -EINVAL;
Christoph Hellwig0a06ff02013-11-14 14:32:07 -0800343#ifdef CONFIG_SMP
Jens Axboec7c22e42008-09-13 20:26:01 +0200344 unsigned long val;
345
346 ret = queue_var_store(&val, page, count);
Dave Reisnerb1f3b642012-09-08 11:55:45 -0400347 if (ret < 0)
348 return ret;
349
Jens Axboec7c22e42008-09-13 20:26:01 +0200350 spin_lock_irq(q->queue_lock);
Eric Seppanene8037d42011-08-23 21:25:12 +0200351 if (val == 2) {
Jens Axboec7c22e42008-09-13 20:26:01 +0200352 queue_flag_set(QUEUE_FLAG_SAME_COMP, q);
Eric Seppanene8037d42011-08-23 21:25:12 +0200353 queue_flag_set(QUEUE_FLAG_SAME_FORCE, q);
354 } else if (val == 1) {
355 queue_flag_set(QUEUE_FLAG_SAME_COMP, q);
356 queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q);
357 } else if (val == 0) {
Dan Williams5757a6d2011-07-23 20:44:25 +0200358 queue_flag_clear(QUEUE_FLAG_SAME_COMP, q);
359 queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q);
360 }
Jens Axboec7c22e42008-09-13 20:26:01 +0200361 spin_unlock_irq(q->queue_lock);
362#endif
363 return ret;
364}
Jens Axboe8324aa92008-01-29 14:51:59 +0100365
Jens Axboe06426ad2016-11-14 13:01:59 -0700366static ssize_t queue_poll_delay_show(struct request_queue *q, char *page)
367{
Jens Axboe64f1c212016-11-14 13:03:03 -0700368 int val;
369
370 if (q->poll_nsec == -1)
371 val = -1;
372 else
373 val = q->poll_nsec / 1000;
374
375 return sprintf(page, "%d\n", val);
Jens Axboe06426ad2016-11-14 13:01:59 -0700376}
377
378static ssize_t queue_poll_delay_store(struct request_queue *q, const char *page,
379 size_t count)
380{
Jens Axboe64f1c212016-11-14 13:03:03 -0700381 int err, val;
Jens Axboe06426ad2016-11-14 13:01:59 -0700382
383 if (!q->mq_ops || !q->mq_ops->poll)
384 return -EINVAL;
385
Jens Axboe64f1c212016-11-14 13:03:03 -0700386 err = kstrtoint(page, 10, &val);
387 if (err < 0)
388 return err;
Jens Axboe06426ad2016-11-14 13:01:59 -0700389
Jens Axboe64f1c212016-11-14 13:03:03 -0700390 if (val == -1)
391 q->poll_nsec = -1;
392 else
393 q->poll_nsec = val * 1000;
394
395 return count;
Jens Axboe06426ad2016-11-14 13:01:59 -0700396}
397
Jens Axboe05229be2015-11-05 10:44:55 -0700398static ssize_t queue_poll_show(struct request_queue *q, char *page)
399{
400 return queue_var_show(test_bit(QUEUE_FLAG_POLL, &q->queue_flags), page);
401}
402
403static ssize_t queue_poll_store(struct request_queue *q, const char *page,
404 size_t count)
405{
406 unsigned long poll_on;
407 ssize_t ret;
408
409 if (!q->mq_ops || !q->mq_ops->poll)
410 return -EINVAL;
411
412 ret = queue_var_store(&poll_on, page, count);
413 if (ret < 0)
414 return ret;
415
416 spin_lock_irq(q->queue_lock);
417 if (poll_on)
418 queue_flag_set(QUEUE_FLAG_POLL, q);
419 else
420 queue_flag_clear(QUEUE_FLAG_POLL, q);
421 spin_unlock_irq(q->queue_lock);
422
423 return ret;
424}
425
Jens Axboe87760e52016-11-09 12:38:14 -0700426static ssize_t queue_wb_lat_show(struct request_queue *q, char *page)
427{
428 if (!q->rq_wb)
429 return -EINVAL;
430
431 return sprintf(page, "%llu\n", div_u64(q->rq_wb->min_lat_nsec, 1000));
432}
433
434static ssize_t queue_wb_lat_store(struct request_queue *q, const char *page,
435 size_t count)
436{
Jens Axboe80e091d2016-11-28 09:22:47 -0700437 struct rq_wb *rwb;
Jens Axboe87760e52016-11-09 12:38:14 -0700438 ssize_t ret;
Jens Axboe80e091d2016-11-28 09:22:47 -0700439 s64 val;
Jens Axboe87760e52016-11-09 12:38:14 -0700440
Jens Axboe87760e52016-11-09 12:38:14 -0700441 ret = queue_var_store64(&val, page);
442 if (ret < 0)
443 return ret;
Jens Axboed62118b2016-11-28 09:40:34 -0700444 if (val < -1)
445 return -EINVAL;
446
447 rwb = q->rq_wb;
448 if (!rwb) {
449 ret = wbt_init(q);
450 if (ret)
451 return ret;
452
453 rwb = q->rq_wb;
454 if (!rwb)
455 return -EINVAL;
456 }
Jens Axboe87760e52016-11-09 12:38:14 -0700457
Jens Axboe80e091d2016-11-28 09:22:47 -0700458 if (val == -1)
459 rwb->min_lat_nsec = wbt_default_latency_nsec(q);
460 else if (val >= 0)
461 rwb->min_lat_nsec = val * 1000ULL;
Jens Axboed62118b2016-11-28 09:40:34 -0700462
463 if (rwb->enable_state == WBT_STATE_ON_DEFAULT)
464 rwb->enable_state = WBT_STATE_ON_MANUAL;
Jens Axboe80e091d2016-11-28 09:22:47 -0700465
466 wbt_update_limits(rwb);
Jens Axboe87760e52016-11-09 12:38:14 -0700467 return count;
468}
469
Jens Axboe93e9d8e2016-04-12 12:32:46 -0600470static ssize_t queue_wc_show(struct request_queue *q, char *page)
471{
472 if (test_bit(QUEUE_FLAG_WC, &q->queue_flags))
473 return sprintf(page, "write back\n");
474
475 return sprintf(page, "write through\n");
476}
477
478static ssize_t queue_wc_store(struct request_queue *q, const char *page,
479 size_t count)
480{
481 int set = -1;
482
483 if (!strncmp(page, "write back", 10))
484 set = 1;
485 else if (!strncmp(page, "write through", 13) ||
486 !strncmp(page, "none", 4))
487 set = 0;
488
489 if (set == -1)
490 return -EINVAL;
491
492 spin_lock_irq(q->queue_lock);
493 if (set)
494 queue_flag_set(QUEUE_FLAG_WC, q);
495 else
496 queue_flag_clear(QUEUE_FLAG_WC, q);
497 spin_unlock_irq(q->queue_lock);
498
499 return count;
500}
501
Yigal Kormanea6ca602016-06-23 17:05:51 -0400502static ssize_t queue_dax_show(struct request_queue *q, char *page)
503{
504 return queue_var_show(blk_queue_dax(q), page);
505}
506
Jens Axboe8324aa92008-01-29 14:51:59 +0100507static struct queue_sysfs_entry queue_requests_entry = {
508 .attr = {.name = "nr_requests", .mode = S_IRUGO | S_IWUSR },
509 .show = queue_requests_show,
510 .store = queue_requests_store,
511};
512
513static struct queue_sysfs_entry queue_ra_entry = {
514 .attr = {.name = "read_ahead_kb", .mode = S_IRUGO | S_IWUSR },
515 .show = queue_ra_show,
516 .store = queue_ra_store,
517};
518
519static struct queue_sysfs_entry queue_max_sectors_entry = {
520 .attr = {.name = "max_sectors_kb", .mode = S_IRUGO | S_IWUSR },
521 .show = queue_max_sectors_show,
522 .store = queue_max_sectors_store,
523};
524
525static struct queue_sysfs_entry queue_max_hw_sectors_entry = {
526 .attr = {.name = "max_hw_sectors_kb", .mode = S_IRUGO },
527 .show = queue_max_hw_sectors_show,
528};
529
Martin K. Petersenc77a5712010-03-10 00:48:33 -0500530static struct queue_sysfs_entry queue_max_segments_entry = {
531 .attr = {.name = "max_segments", .mode = S_IRUGO },
532 .show = queue_max_segments_show,
533};
534
Christoph Hellwig1e739732017-02-08 14:46:49 +0100535static struct queue_sysfs_entry queue_max_discard_segments_entry = {
536 .attr = {.name = "max_discard_segments", .mode = S_IRUGO },
537 .show = queue_max_discard_segments_show,
538};
539
Martin K. Petersen13f05c82010-09-10 20:50:10 +0200540static struct queue_sysfs_entry queue_max_integrity_segments_entry = {
541 .attr = {.name = "max_integrity_segments", .mode = S_IRUGO },
542 .show = queue_max_integrity_segments_show,
543};
544
Martin K. Petersenc77a5712010-03-10 00:48:33 -0500545static struct queue_sysfs_entry queue_max_segment_size_entry = {
546 .attr = {.name = "max_segment_size", .mode = S_IRUGO },
547 .show = queue_max_segment_size_show,
548};
549
Jens Axboe8324aa92008-01-29 14:51:59 +0100550static struct queue_sysfs_entry queue_iosched_entry = {
551 .attr = {.name = "scheduler", .mode = S_IRUGO | S_IWUSR },
552 .show = elv_iosched_show,
553 .store = elv_iosched_store,
554};
555
Martin K. Petersene68b9032008-01-29 19:14:08 +0100556static struct queue_sysfs_entry queue_hw_sector_size_entry = {
557 .attr = {.name = "hw_sector_size", .mode = S_IRUGO },
Martin K. Petersene1defc42009-05-22 17:17:49 -0400558 .show = queue_logical_block_size_show,
559};
560
561static struct queue_sysfs_entry queue_logical_block_size_entry = {
562 .attr = {.name = "logical_block_size", .mode = S_IRUGO },
563 .show = queue_logical_block_size_show,
Martin K. Petersene68b9032008-01-29 19:14:08 +0100564};
565
Martin K. Petersenc72758f2009-05-22 17:17:53 -0400566static struct queue_sysfs_entry queue_physical_block_size_entry = {
567 .attr = {.name = "physical_block_size", .mode = S_IRUGO },
568 .show = queue_physical_block_size_show,
569};
570
Hannes Reinecke87caf972016-10-18 15:40:30 +0900571static struct queue_sysfs_entry queue_chunk_sectors_entry = {
572 .attr = {.name = "chunk_sectors", .mode = S_IRUGO },
573 .show = queue_chunk_sectors_show,
574};
575
Martin K. Petersenc72758f2009-05-22 17:17:53 -0400576static struct queue_sysfs_entry queue_io_min_entry = {
577 .attr = {.name = "minimum_io_size", .mode = S_IRUGO },
578 .show = queue_io_min_show,
579};
580
581static struct queue_sysfs_entry queue_io_opt_entry = {
582 .attr = {.name = "optimal_io_size", .mode = S_IRUGO },
583 .show = queue_io_opt_show,
Jens Axboe8324aa92008-01-29 14:51:59 +0100584};
585
Martin K. Petersen86b37282009-11-10 11:50:21 +0100586static struct queue_sysfs_entry queue_discard_granularity_entry = {
587 .attr = {.name = "discard_granularity", .mode = S_IRUGO },
588 .show = queue_discard_granularity_show,
589};
590
Jens Axboe0034af02015-07-16 09:14:26 -0600591static struct queue_sysfs_entry queue_discard_max_hw_entry = {
592 .attr = {.name = "discard_max_hw_bytes", .mode = S_IRUGO },
593 .show = queue_discard_max_hw_show,
594};
595
Martin K. Petersen86b37282009-11-10 11:50:21 +0100596static struct queue_sysfs_entry queue_discard_max_entry = {
Jens Axboe0034af02015-07-16 09:14:26 -0600597 .attr = {.name = "discard_max_bytes", .mode = S_IRUGO | S_IWUSR },
Martin K. Petersen86b37282009-11-10 11:50:21 +0100598 .show = queue_discard_max_show,
Jens Axboe0034af02015-07-16 09:14:26 -0600599 .store = queue_discard_max_store,
Martin K. Petersen86b37282009-11-10 11:50:21 +0100600};
601
Martin K. Petersen98262f22009-12-03 09:24:48 +0100602static struct queue_sysfs_entry queue_discard_zeroes_data_entry = {
603 .attr = {.name = "discard_zeroes_data", .mode = S_IRUGO },
604 .show = queue_discard_zeroes_data_show,
605};
606
Martin K. Petersen4363ac72012-09-18 12:19:27 -0400607static struct queue_sysfs_entry queue_write_same_max_entry = {
608 .attr = {.name = "write_same_max_bytes", .mode = S_IRUGO },
609 .show = queue_write_same_max_show,
610};
611
Chaitanya Kulkarnia6f07882016-11-30 12:28:59 -0800612static struct queue_sysfs_entry queue_write_zeroes_max_entry = {
613 .attr = {.name = "write_zeroes_max_bytes", .mode = S_IRUGO },
614 .show = queue_write_zeroes_max_show,
615};
616
Bartlomiej Zolnierkiewicz1308835f2009-01-07 12:22:39 +0100617static struct queue_sysfs_entry queue_nonrot_entry = {
618 .attr = {.name = "rotational", .mode = S_IRUGO | S_IWUSR },
Jens Axboe956bcb72010-08-07 18:13:50 +0200619 .show = queue_show_nonrot,
620 .store = queue_store_nonrot,
Bartlomiej Zolnierkiewicz1308835f2009-01-07 12:22:39 +0100621};
622
Damien Le Moal797476b2016-10-18 15:40:29 +0900623static struct queue_sysfs_entry queue_zoned_entry = {
624 .attr = {.name = "zoned", .mode = S_IRUGO },
625 .show = queue_zoned_show,
626};
627
Alan D. Brunelleac9fafa2008-04-29 14:44:19 +0200628static struct queue_sysfs_entry queue_nomerges_entry = {
629 .attr = {.name = "nomerges", .mode = S_IRUGO | S_IWUSR },
630 .show = queue_nomerges_show,
631 .store = queue_nomerges_store,
632};
633
Jens Axboec7c22e42008-09-13 20:26:01 +0200634static struct queue_sysfs_entry queue_rq_affinity_entry = {
635 .attr = {.name = "rq_affinity", .mode = S_IRUGO | S_IWUSR },
636 .show = queue_rq_affinity_show,
637 .store = queue_rq_affinity_store,
638};
639
Jens Axboebc58ba92009-01-23 10:54:44 +0100640static struct queue_sysfs_entry queue_iostats_entry = {
641 .attr = {.name = "iostats", .mode = S_IRUGO | S_IWUSR },
Jens Axboe956bcb72010-08-07 18:13:50 +0200642 .show = queue_show_iostats,
643 .store = queue_store_iostats,
Jens Axboebc58ba92009-01-23 10:54:44 +0100644};
645
Jens Axboee2e1a142010-06-09 10:42:09 +0200646static struct queue_sysfs_entry queue_random_entry = {
647 .attr = {.name = "add_random", .mode = S_IRUGO | S_IWUSR },
Jens Axboe956bcb72010-08-07 18:13:50 +0200648 .show = queue_show_random,
649 .store = queue_store_random,
Jens Axboee2e1a142010-06-09 10:42:09 +0200650};
651
Jens Axboe05229be2015-11-05 10:44:55 -0700652static struct queue_sysfs_entry queue_poll_entry = {
653 .attr = {.name = "io_poll", .mode = S_IRUGO | S_IWUSR },
654 .show = queue_poll_show,
655 .store = queue_poll_store,
656};
657
Jens Axboe06426ad2016-11-14 13:01:59 -0700658static struct queue_sysfs_entry queue_poll_delay_entry = {
659 .attr = {.name = "io_poll_delay", .mode = S_IRUGO | S_IWUSR },
660 .show = queue_poll_delay_show,
661 .store = queue_poll_delay_store,
662};
663
Jens Axboe93e9d8e2016-04-12 12:32:46 -0600664static struct queue_sysfs_entry queue_wc_entry = {
665 .attr = {.name = "write_cache", .mode = S_IRUGO | S_IWUSR },
666 .show = queue_wc_show,
667 .store = queue_wc_store,
668};
669
Yigal Kormanea6ca602016-06-23 17:05:51 -0400670static struct queue_sysfs_entry queue_dax_entry = {
671 .attr = {.name = "dax", .mode = S_IRUGO },
672 .show = queue_dax_show,
673};
674
Jens Axboe87760e52016-11-09 12:38:14 -0700675static struct queue_sysfs_entry queue_wb_lat_entry = {
676 .attr = {.name = "wbt_lat_usec", .mode = S_IRUGO | S_IWUSR },
677 .show = queue_wb_lat_show,
678 .store = queue_wb_lat_store,
679};
680
Shaohua Li297e3d82017-03-27 10:51:37 -0700681#ifdef CONFIG_BLK_DEV_THROTTLING_LOW
682static struct queue_sysfs_entry throtl_sample_time_entry = {
683 .attr = {.name = "throttle_sample_time", .mode = S_IRUGO | S_IWUSR },
684 .show = blk_throtl_sample_time_show,
685 .store = blk_throtl_sample_time_store,
686};
687#endif
688
Jens Axboe8324aa92008-01-29 14:51:59 +0100689static struct attribute *default_attrs[] = {
690 &queue_requests_entry.attr,
691 &queue_ra_entry.attr,
692 &queue_max_hw_sectors_entry.attr,
693 &queue_max_sectors_entry.attr,
Martin K. Petersenc77a5712010-03-10 00:48:33 -0500694 &queue_max_segments_entry.attr,
Christoph Hellwig1e739732017-02-08 14:46:49 +0100695 &queue_max_discard_segments_entry.attr,
Martin K. Petersen13f05c82010-09-10 20:50:10 +0200696 &queue_max_integrity_segments_entry.attr,
Martin K. Petersenc77a5712010-03-10 00:48:33 -0500697 &queue_max_segment_size_entry.attr,
Jens Axboe8324aa92008-01-29 14:51:59 +0100698 &queue_iosched_entry.attr,
Martin K. Petersene68b9032008-01-29 19:14:08 +0100699 &queue_hw_sector_size_entry.attr,
Martin K. Petersene1defc42009-05-22 17:17:49 -0400700 &queue_logical_block_size_entry.attr,
Martin K. Petersenc72758f2009-05-22 17:17:53 -0400701 &queue_physical_block_size_entry.attr,
Hannes Reinecke87caf972016-10-18 15:40:30 +0900702 &queue_chunk_sectors_entry.attr,
Martin K. Petersenc72758f2009-05-22 17:17:53 -0400703 &queue_io_min_entry.attr,
704 &queue_io_opt_entry.attr,
Martin K. Petersen86b37282009-11-10 11:50:21 +0100705 &queue_discard_granularity_entry.attr,
706 &queue_discard_max_entry.attr,
Jens Axboe0034af02015-07-16 09:14:26 -0600707 &queue_discard_max_hw_entry.attr,
Martin K. Petersen98262f22009-12-03 09:24:48 +0100708 &queue_discard_zeroes_data_entry.attr,
Martin K. Petersen4363ac72012-09-18 12:19:27 -0400709 &queue_write_same_max_entry.attr,
Chaitanya Kulkarnia6f07882016-11-30 12:28:59 -0800710 &queue_write_zeroes_max_entry.attr,
Bartlomiej Zolnierkiewicz1308835f2009-01-07 12:22:39 +0100711 &queue_nonrot_entry.attr,
Damien Le Moal797476b2016-10-18 15:40:29 +0900712 &queue_zoned_entry.attr,
Alan D. Brunelleac9fafa2008-04-29 14:44:19 +0200713 &queue_nomerges_entry.attr,
Jens Axboec7c22e42008-09-13 20:26:01 +0200714 &queue_rq_affinity_entry.attr,
Jens Axboebc58ba92009-01-23 10:54:44 +0100715 &queue_iostats_entry.attr,
Jens Axboee2e1a142010-06-09 10:42:09 +0200716 &queue_random_entry.attr,
Jens Axboe05229be2015-11-05 10:44:55 -0700717 &queue_poll_entry.attr,
Jens Axboe93e9d8e2016-04-12 12:32:46 -0600718 &queue_wc_entry.attr,
Yigal Kormanea6ca602016-06-23 17:05:51 -0400719 &queue_dax_entry.attr,
Jens Axboe87760e52016-11-09 12:38:14 -0700720 &queue_wb_lat_entry.attr,
Jens Axboe06426ad2016-11-14 13:01:59 -0700721 &queue_poll_delay_entry.attr,
Shaohua Li297e3d82017-03-27 10:51:37 -0700722#ifdef CONFIG_BLK_DEV_THROTTLING_LOW
723 &throtl_sample_time_entry.attr,
724#endif
Jens Axboe8324aa92008-01-29 14:51:59 +0100725 NULL,
726};
727
728#define to_queue(atr) container_of((atr), struct queue_sysfs_entry, attr)
729
730static ssize_t
731queue_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
732{
733 struct queue_sysfs_entry *entry = to_queue(attr);
734 struct request_queue *q =
735 container_of(kobj, struct request_queue, kobj);
736 ssize_t res;
737
738 if (!entry->show)
739 return -EIO;
740 mutex_lock(&q->sysfs_lock);
Bart Van Assche3f3299d2012-11-28 13:42:38 +0100741 if (blk_queue_dying(q)) {
Jens Axboe8324aa92008-01-29 14:51:59 +0100742 mutex_unlock(&q->sysfs_lock);
743 return -ENOENT;
744 }
745 res = entry->show(q, page);
746 mutex_unlock(&q->sysfs_lock);
747 return res;
748}
749
750static ssize_t
751queue_attr_store(struct kobject *kobj, struct attribute *attr,
752 const char *page, size_t length)
753{
754 struct queue_sysfs_entry *entry = to_queue(attr);
Jens Axboe6728cb02008-01-31 13:03:55 +0100755 struct request_queue *q;
Jens Axboe8324aa92008-01-29 14:51:59 +0100756 ssize_t res;
757
758 if (!entry->store)
759 return -EIO;
Jens Axboe6728cb02008-01-31 13:03:55 +0100760
761 q = container_of(kobj, struct request_queue, kobj);
Jens Axboe8324aa92008-01-29 14:51:59 +0100762 mutex_lock(&q->sysfs_lock);
Bart Van Assche3f3299d2012-11-28 13:42:38 +0100763 if (blk_queue_dying(q)) {
Jens Axboe8324aa92008-01-29 14:51:59 +0100764 mutex_unlock(&q->sysfs_lock);
765 return -ENOENT;
766 }
767 res = entry->store(q, page, length);
768 mutex_unlock(&q->sysfs_lock);
769 return res;
770}
771
Tejun Heo548bc8e2013-01-09 08:05:13 -0800772static void blk_free_queue_rcu(struct rcu_head *rcu_head)
773{
774 struct request_queue *q = container_of(rcu_head, struct request_queue,
775 rcu_head);
776 kmem_cache_free(blk_requestq_cachep, q);
777}
778
Jens Axboe8324aa92008-01-29 14:51:59 +0100779/**
Bart Van Asschedc9edc42017-06-14 13:27:50 -0600780 * __blk_release_queue - release a request queue when it is no longer needed
781 * @work: pointer to the release_work member of the request queue to be released
Jens Axboe8324aa92008-01-29 14:51:59 +0100782 *
783 * Description:
Bart Van Asschedc9edc42017-06-14 13:27:50 -0600784 * blk_release_queue is the counterpart of blk_init_queue(). It should be
785 * called when a request queue is being released; typically when a block
786 * device is being de-registered. Its primary task it to free the queue
787 * itself.
Jens Axboe8324aa92008-01-29 14:51:59 +0100788 *
Bart Van Asschedc9edc42017-06-14 13:27:50 -0600789 * Notes:
Bart Van Assche45a9c9d2014-12-09 16:57:48 +0100790 * The low level driver must have finished any outstanding requests first
791 * via blk_cleanup_queue().
Bart Van Asschedc9edc42017-06-14 13:27:50 -0600792 *
793 * Although blk_release_queue() may be called with preemption disabled,
794 * __blk_release_queue() may sleep.
795 */
796static void __blk_release_queue(struct work_struct *work)
Jens Axboe8324aa92008-01-29 14:51:59 +0100797{
Bart Van Asschedc9edc42017-06-14 13:27:50 -0600798 struct request_queue *q = container_of(work, typeof(*q), release_work);
Jens Axboe8324aa92008-01-29 14:51:59 +0100799
Omar Sandoval34dbad52017-03-21 08:56:08 -0700800 if (test_bit(QUEUE_FLAG_POLL_STATS, &q->queue_flags))
801 blk_stat_remove_callback(q, q->poll_cb);
802 blk_stat_free_callback(q->poll_cb);
Jan Karad03f6cd2017-02-02 15:56:51 +0100803 bdi_put(q->backing_dev_info);
Tejun Heoe8989fa2012-03-05 13:15:20 -0800804 blkcg_exit_queue(q);
805
Tejun Heo7e5a8792011-12-14 00:33:42 +0100806 if (q->elevator) {
Tejun Heo7e5a8792011-12-14 00:33:42 +0100807 ioc_clear_queue(q);
Omar Sandoval54d53292017-04-07 08:52:27 -0600808 elevator_exit(q, q->elevator);
Tejun Heo7e5a8792011-12-14 00:33:42 +0100809 }
Hannes Reinecke777eb1b2011-09-28 08:07:01 -0600810
Omar Sandoval34dbad52017-03-21 08:56:08 -0700811 blk_free_queue_stats(q->stats);
812
Bart Van Asscheb425e502017-05-31 14:43:45 -0700813 blk_exit_rl(q, &q->root_rl);
Jens Axboe8324aa92008-01-29 14:51:59 +0100814
815 if (q->queue_tags)
816 __blk_queue_free_tags(q);
817
Christoph Hellwig6d247d72017-01-27 09:51:45 -0700818 if (!q->mq_ops) {
819 if (q->exit_rq_fn)
820 q->exit_rq_fn(q, q->fq->flush_rq);
Ming Leif70ced02014-09-25 23:23:47 +0800821 blk_free_flush_queue(q->fq);
Christoph Hellwig6d247d72017-01-27 09:51:45 -0700822 } else {
Ming Leie09aae72015-01-29 20:17:27 +0800823 blk_mq_release(q);
Christoph Hellwig6d247d72017-01-27 09:51:45 -0700824 }
Christoph Hellwig18741982014-02-10 09:29:00 -0700825
Jens Axboe8324aa92008-01-29 14:51:59 +0100826 blk_trace_shutdown(q);
827
Omar Sandoval62ebce12017-01-31 14:53:21 -0800828 if (q->mq_ops)
829 blk_mq_debugfs_unregister(q);
830
Kent Overstreet54efd502015-04-23 22:37:18 -0700831 if (q->bio_split)
832 bioset_free(q->bio_split);
833
Tejun Heoa73f7302011-12-14 00:33:37 +0100834 ida_simple_remove(&blk_queue_ida, q->id);
Tejun Heo548bc8e2013-01-09 08:05:13 -0800835 call_rcu(&q->rcu_head, blk_free_queue_rcu);
Jens Axboe8324aa92008-01-29 14:51:59 +0100836}
837
Bart Van Asschedc9edc42017-06-14 13:27:50 -0600838static void blk_release_queue(struct kobject *kobj)
839{
840 struct request_queue *q =
841 container_of(kobj, struct request_queue, kobj);
842
843 INIT_WORK(&q->release_work, __blk_release_queue);
844 schedule_work(&q->release_work);
845}
846
Emese Revfy52cf25d2010-01-19 02:58:23 +0100847static const struct sysfs_ops queue_sysfs_ops = {
Jens Axboe8324aa92008-01-29 14:51:59 +0100848 .show = queue_attr_show,
849 .store = queue_attr_store,
850};
851
852struct kobj_type blk_queue_ktype = {
853 .sysfs_ops = &queue_sysfs_ops,
854 .default_attrs = default_attrs,
855 .release = blk_release_queue,
856};
857
858int blk_register_queue(struct gendisk *disk)
859{
860 int ret;
Li Zefan1d54ad62009-04-14 14:00:05 +0800861 struct device *dev = disk_to_dev(disk);
Jens Axboe8324aa92008-01-29 14:51:59 +0100862 struct request_queue *q = disk->queue;
863
Akinobu Mitafb199742008-04-21 09:51:06 +0200864 if (WARN_ON(!q))
Jens Axboe8324aa92008-01-29 14:51:59 +0100865 return -ENXIO;
866
Omar Sandoval334335d2017-03-28 16:12:15 -0700867 WARN_ONCE(test_bit(QUEUE_FLAG_REGISTERED, &q->queue_flags),
868 "%s is registering an already registered queue\n",
869 kobject_name(&dev->kobj));
870 queue_flag_set_unlocked(QUEUE_FLAG_REGISTERED, q);
871
Tejun Heo749fefe2012-09-20 14:08:52 -0700872 /*
Tejun Heo17497ac2014-09-24 13:31:50 -0400873 * SCSI probing may synchronously create and destroy a lot of
874 * request_queues for non-existent devices. Shutting down a fully
875 * functional queue takes measureable wallclock time as RCU grace
876 * periods are involved. To avoid excessive latency in these
877 * cases, a request_queue starts out in a degraded mode which is
878 * faster to shut down and is made fully functional here as
879 * request_queues for non-existent devices never get registered.
Tejun Heo749fefe2012-09-20 14:08:52 -0700880 */
Alan Sterndf35c7c2014-09-09 11:50:58 -0400881 if (!blk_queue_init_done(q)) {
882 queue_flag_set_unlocked(QUEUE_FLAG_INIT_DONE, q);
Dan Williams3ef28e82015-10-21 13:20:12 -0400883 percpu_ref_switch_to_percpu(&q->q_usage_counter);
Alan Sterndf35c7c2014-09-09 11:50:58 -0400884 blk_queue_bypass_end(q);
885 }
Tejun Heo749fefe2012-09-20 14:08:52 -0700886
Li Zefan1d54ad62009-04-14 14:00:05 +0800887 ret = blk_trace_init_sysfs(dev);
888 if (ret)
889 return ret;
890
Tahsin Erdoganb410aff2017-02-14 19:27:38 -0800891 /* Prevent changes through sysfs until registration is completed. */
892 mutex_lock(&q->sysfs_lock);
893
Linus Torvaldsc9059592009-06-11 10:52:27 -0700894 ret = kobject_add(&q->kobj, kobject_get(&dev->kobj), "%s", "queue");
Liu Yuaned5302d2011-04-19 13:47:58 +0200895 if (ret < 0) {
896 blk_trace_remove_sysfs(dev);
Tahsin Erdoganb410aff2017-02-14 19:27:38 -0800897 goto unlock;
Liu Yuaned5302d2011-04-19 13:47:58 +0200898 }
Jens Axboe8324aa92008-01-29 14:51:59 +0100899
Bart Van Asschea8ecdd72017-05-25 16:38:06 -0700900 if (q->mq_ops) {
Bart Van Assche2d0364c2017-04-26 13:47:48 -0700901 __blk_mq_register_dev(dev, q);
Bart Van Asschea8ecdd72017-05-25 16:38:06 -0700902 blk_mq_debugfs_register(q);
903 }
Omar Sandoval9c1051a2017-05-04 08:17:21 -0600904
Jens Axboe8324aa92008-01-29 14:51:59 +0100905 kobject_uevent(&q->kobj, KOBJ_ADD);
906
Jan Kara8330cdb2017-04-19 11:33:27 +0200907 wbt_enable_default(q);
Jens Axboe87760e52016-11-09 12:38:14 -0700908
Shaohua Lid61fcfa2017-03-27 10:51:38 -0700909 blk_throtl_register_queue(q);
910
Omar Sandoval80c6b152017-02-06 12:52:24 -0800911 if (q->request_fn || (q->mq_ops && q->elevator)) {
912 ret = elv_register_queue(q);
913 if (ret) {
914 kobject_uevent(&q->kobj, KOBJ_REMOVE);
915 kobject_del(&q->kobj);
916 blk_trace_remove_sysfs(dev);
917 kobject_put(&dev->kobj);
Tahsin Erdoganb410aff2017-02-14 19:27:38 -0800918 goto unlock;
Omar Sandoval80c6b152017-02-06 12:52:24 -0800919 }
Jens Axboe8324aa92008-01-29 14:51:59 +0100920 }
Tahsin Erdoganb410aff2017-02-14 19:27:38 -0800921 ret = 0;
922unlock:
923 mutex_unlock(&q->sysfs_lock);
924 return ret;
Jens Axboe8324aa92008-01-29 14:51:59 +0100925}
926
927void blk_unregister_queue(struct gendisk *disk)
928{
929 struct request_queue *q = disk->queue;
930
Akinobu Mitafb199742008-04-21 09:51:06 +0200931 if (WARN_ON(!q))
932 return;
933
David Jefferye9a823f2017-08-28 10:52:44 -0600934 mutex_lock(&q->sysfs_lock);
Omar Sandoval334335d2017-03-28 16:12:15 -0700935 queue_flag_clear_unlocked(QUEUE_FLAG_REGISTERED, q);
David Jefferye9a823f2017-08-28 10:52:44 -0600936 mutex_unlock(&q->sysfs_lock);
Omar Sandoval334335d2017-03-28 16:12:15 -0700937
Omar Sandoval02ba8892017-03-28 16:12:17 -0700938 wbt_exit(q);
939
940
Jens Axboe320ae512013-10-24 09:20:05 +0100941 if (q->mq_ops)
Matias Bjørlingb21d5b32016-09-16 14:25:06 +0200942 blk_mq_unregister_dev(disk_to_dev(disk), q);
Jens Axboe320ae512013-10-24 09:20:05 +0100943
Omar Sandoval80c6b152017-02-06 12:52:24 -0800944 if (q->request_fn || (q->mq_ops && q->elevator))
Jens Axboe8324aa92008-01-29 14:51:59 +0100945 elv_unregister_queue(q);
946
Zdenek Kabelac48c0d4d2009-09-25 06:19:26 +0200947 kobject_uevent(&q->kobj, KOBJ_REMOVE);
948 kobject_del(&q->kobj);
949 blk_trace_remove_sysfs(disk_to_dev(disk));
950 kobject_put(&disk_to_dev(disk)->kobj);
Jens Axboe8324aa92008-01-29 14:51:59 +0100951}