blob: 0749b89c68852fc824e4afa862d871814c56ddd8 [file] [log] [blame]
Jens Axboe8324aa92008-01-29 14:51:59 +01001/*
2 * Functions related to sysfs handling
3 */
4#include <linux/kernel.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +09005#include <linux/slab.h>
Jens Axboe8324aa92008-01-29 14:51:59 +01006#include <linux/module.h>
7#include <linux/bio.h>
8#include <linux/blkdev.h>
9#include <linux/blktrace_api.h>
10
11#include "blk.h"
12
13struct queue_sysfs_entry {
14 struct attribute attr;
15 ssize_t (*show)(struct request_queue *, char *);
16 ssize_t (*store)(struct request_queue *, const char *, size_t);
17};
18
19static ssize_t
Xiaotian Feng9cb308c2009-07-17 15:26:26 +080020queue_var_show(unsigned long var, char *page)
Jens Axboe8324aa92008-01-29 14:51:59 +010021{
Xiaotian Feng9cb308c2009-07-17 15:26:26 +080022 return sprintf(page, "%lu\n", var);
Jens Axboe8324aa92008-01-29 14:51:59 +010023}
24
25static ssize_t
26queue_var_store(unsigned long *var, const char *page, size_t count)
27{
28 char *p = (char *) page;
29
30 *var = simple_strtoul(p, &p, 10);
31 return count;
32}
33
34static ssize_t queue_requests_show(struct request_queue *q, char *page)
35{
36 return queue_var_show(q->nr_requests, (page));
37}
38
39static ssize_t
40queue_requests_store(struct request_queue *q, const char *page, size_t count)
41{
42 struct request_list *rl = &q->rq;
43 unsigned long nr;
Jens Axboeb8a9ae72009-09-11 22:44:29 +020044 int ret;
45
46 if (!q->request_fn)
47 return -EINVAL;
48
49 ret = queue_var_store(&nr, page, count);
Jens Axboe8324aa92008-01-29 14:51:59 +010050 if (nr < BLKDEV_MIN_RQ)
51 nr = BLKDEV_MIN_RQ;
52
53 spin_lock_irq(q->queue_lock);
54 q->nr_requests = nr;
55 blk_queue_congestion_threshold(q);
56
Jens Axboe1faa16d2009-04-06 14:48:01 +020057 if (rl->count[BLK_RW_SYNC] >= queue_congestion_on_threshold(q))
58 blk_set_queue_congested(q, BLK_RW_SYNC);
59 else if (rl->count[BLK_RW_SYNC] < queue_congestion_off_threshold(q))
60 blk_clear_queue_congested(q, BLK_RW_SYNC);
Jens Axboe8324aa92008-01-29 14:51:59 +010061
Jens Axboe1faa16d2009-04-06 14:48:01 +020062 if (rl->count[BLK_RW_ASYNC] >= queue_congestion_on_threshold(q))
63 blk_set_queue_congested(q, BLK_RW_ASYNC);
64 else if (rl->count[BLK_RW_ASYNC] < queue_congestion_off_threshold(q))
65 blk_clear_queue_congested(q, BLK_RW_ASYNC);
Jens Axboe8324aa92008-01-29 14:51:59 +010066
Jens Axboe1faa16d2009-04-06 14:48:01 +020067 if (rl->count[BLK_RW_SYNC] >= q->nr_requests) {
68 blk_set_queue_full(q, BLK_RW_SYNC);
69 } else if (rl->count[BLK_RW_SYNC]+1 <= q->nr_requests) {
70 blk_clear_queue_full(q, BLK_RW_SYNC);
71 wake_up(&rl->wait[BLK_RW_SYNC]);
Jens Axboe8324aa92008-01-29 14:51:59 +010072 }
73
Jens Axboe1faa16d2009-04-06 14:48:01 +020074 if (rl->count[BLK_RW_ASYNC] >= q->nr_requests) {
75 blk_set_queue_full(q, BLK_RW_ASYNC);
76 } else if (rl->count[BLK_RW_ASYNC]+1 <= q->nr_requests) {
77 blk_clear_queue_full(q, BLK_RW_ASYNC);
78 wake_up(&rl->wait[BLK_RW_ASYNC]);
Jens Axboe8324aa92008-01-29 14:51:59 +010079 }
80 spin_unlock_irq(q->queue_lock);
81 return ret;
82}
83
84static ssize_t queue_ra_show(struct request_queue *q, char *page)
85{
Xiaotian Feng9cb308c2009-07-17 15:26:26 +080086 unsigned long ra_kb = q->backing_dev_info.ra_pages <<
87 (PAGE_CACHE_SHIFT - 10);
Jens Axboe8324aa92008-01-29 14:51:59 +010088
89 return queue_var_show(ra_kb, (page));
90}
91
92static ssize_t
93queue_ra_store(struct request_queue *q, const char *page, size_t count)
94{
95 unsigned long ra_kb;
96 ssize_t ret = queue_var_store(&ra_kb, page, count);
97
Jens Axboe8324aa92008-01-29 14:51:59 +010098 q->backing_dev_info.ra_pages = ra_kb >> (PAGE_CACHE_SHIFT - 10);
Jens Axboe8324aa92008-01-29 14:51:59 +010099
100 return ret;
101}
102
103static ssize_t queue_max_sectors_show(struct request_queue *q, char *page)
104{
Martin K. Petersenae03bf62009-05-22 17:17:50 -0400105 int max_sectors_kb = queue_max_sectors(q) >> 1;
Jens Axboe8324aa92008-01-29 14:51:59 +0100106
107 return queue_var_show(max_sectors_kb, (page));
108}
109
Martin K. Petersenc77a5712010-03-10 00:48:33 -0500110static ssize_t queue_max_segments_show(struct request_queue *q, char *page)
111{
112 return queue_var_show(queue_max_segments(q), (page));
113}
114
115static ssize_t queue_max_segment_size_show(struct request_queue *q, char *page)
116{
117 if (test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags))
118 return queue_var_show(queue_max_segment_size(q), (page));
119
120 return queue_var_show(PAGE_CACHE_SIZE, (page));
121}
122
Martin K. Petersene1defc42009-05-22 17:17:49 -0400123static ssize_t queue_logical_block_size_show(struct request_queue *q, char *page)
Martin K. Petersene68b9032008-01-29 19:14:08 +0100124{
Martin K. Petersene1defc42009-05-22 17:17:49 -0400125 return queue_var_show(queue_logical_block_size(q), page);
Martin K. Petersene68b9032008-01-29 19:14:08 +0100126}
127
Martin K. Petersenc72758f2009-05-22 17:17:53 -0400128static ssize_t queue_physical_block_size_show(struct request_queue *q, char *page)
129{
130 return queue_var_show(queue_physical_block_size(q), page);
131}
132
133static ssize_t queue_io_min_show(struct request_queue *q, char *page)
134{
135 return queue_var_show(queue_io_min(q), page);
136}
137
138static ssize_t queue_io_opt_show(struct request_queue *q, char *page)
139{
140 return queue_var_show(queue_io_opt(q), page);
Jens Axboe8324aa92008-01-29 14:51:59 +0100141}
142
Martin K. Petersen86b37282009-11-10 11:50:21 +0100143static ssize_t queue_discard_granularity_show(struct request_queue *q, char *page)
144{
145 return queue_var_show(q->limits.discard_granularity, page);
146}
147
148static ssize_t queue_discard_max_show(struct request_queue *q, char *page)
149{
150 return queue_var_show(q->limits.max_discard_sectors << 9, page);
151}
152
Martin K. Petersen98262f22009-12-03 09:24:48 +0100153static ssize_t queue_discard_zeroes_data_show(struct request_queue *q, char *page)
154{
155 return queue_var_show(queue_discard_zeroes_data(q), page);
156}
157
Jens Axboe8324aa92008-01-29 14:51:59 +0100158static ssize_t
159queue_max_sectors_store(struct request_queue *q, const char *page, size_t count)
160{
161 unsigned long max_sectors_kb,
Martin K. Petersenae03bf62009-05-22 17:17:50 -0400162 max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1,
Jens Axboe8324aa92008-01-29 14:51:59 +0100163 page_kb = 1 << (PAGE_CACHE_SHIFT - 10);
164 ssize_t ret = queue_var_store(&max_sectors_kb, page, count);
165
166 if (max_sectors_kb > max_hw_sectors_kb || max_sectors_kb < page_kb)
167 return -EINVAL;
Wu Fengguang7c239512008-11-25 09:08:39 +0100168
Jens Axboe8324aa92008-01-29 14:51:59 +0100169 spin_lock_irq(q->queue_lock);
Nikanth Karthikesanc295fc02009-09-01 22:40:15 +0200170 q->limits.max_sectors = max_sectors_kb << 1;
Jens Axboe8324aa92008-01-29 14:51:59 +0100171 spin_unlock_irq(q->queue_lock);
172
173 return ret;
174}
175
176static ssize_t queue_max_hw_sectors_show(struct request_queue *q, char *page)
177{
Martin K. Petersenae03bf62009-05-22 17:17:50 -0400178 int max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1;
Jens Axboe8324aa92008-01-29 14:51:59 +0100179
180 return queue_var_show(max_hw_sectors_kb, (page));
181}
182
Jens Axboe956bcb72010-08-07 18:13:50 +0200183#define QUEUE_SYSFS_BIT_FNS(name, flag, neg) \
184static ssize_t \
185queue_show_##name(struct request_queue *q, char *page) \
186{ \
187 int bit; \
188 bit = test_bit(QUEUE_FLAG_##flag, &q->queue_flags); \
189 return queue_var_show(neg ? !bit : bit, page); \
190} \
191static ssize_t \
192queue_store_##name(struct request_queue *q, const char *page, size_t count) \
193{ \
194 unsigned long val; \
195 ssize_t ret; \
196 ret = queue_var_store(&val, page, count); \
197 if (neg) \
198 val = !val; \
199 \
200 spin_lock_irq(q->queue_lock); \
201 if (val) \
202 queue_flag_set(QUEUE_FLAG_##flag, q); \
203 else \
204 queue_flag_clear(QUEUE_FLAG_##flag, q); \
205 spin_unlock_irq(q->queue_lock); \
206 return ret; \
Bartlomiej Zolnierkiewicz1308835f2009-01-07 12:22:39 +0100207}
208
Jens Axboe956bcb72010-08-07 18:13:50 +0200209QUEUE_SYSFS_BIT_FNS(nonrot, NONROT, 1);
210QUEUE_SYSFS_BIT_FNS(random, ADD_RANDOM, 0);
211QUEUE_SYSFS_BIT_FNS(iostats, IO_STAT, 0);
212#undef QUEUE_SYSFS_BIT_FNS
Bartlomiej Zolnierkiewicz1308835f2009-01-07 12:22:39 +0100213
Alan D. Brunelleac9fafa2008-04-29 14:44:19 +0200214static ssize_t queue_nomerges_show(struct request_queue *q, char *page)
215{
Alan D. Brunelle488991e2010-01-29 09:04:08 +0100216 return queue_var_show((blk_queue_nomerges(q) << 1) |
217 blk_queue_noxmerges(q), page);
Alan D. Brunelleac9fafa2008-04-29 14:44:19 +0200218}
219
220static ssize_t queue_nomerges_store(struct request_queue *q, const char *page,
221 size_t count)
222{
223 unsigned long nm;
224 ssize_t ret = queue_var_store(&nm, page, count);
225
Jens Axboebf0f9702008-05-07 09:09:39 +0200226 spin_lock_irq(q->queue_lock);
Alan D. Brunelle488991e2010-01-29 09:04:08 +0100227 queue_flag_clear(QUEUE_FLAG_NOMERGES, q);
228 queue_flag_clear(QUEUE_FLAG_NOXMERGES, q);
229 if (nm == 2)
Jens Axboebf0f9702008-05-07 09:09:39 +0200230 queue_flag_set(QUEUE_FLAG_NOMERGES, q);
Alan D. Brunelle488991e2010-01-29 09:04:08 +0100231 else if (nm)
232 queue_flag_set(QUEUE_FLAG_NOXMERGES, q);
Jens Axboebf0f9702008-05-07 09:09:39 +0200233 spin_unlock_irq(q->queue_lock);
Bartlomiej Zolnierkiewicz1308835f2009-01-07 12:22:39 +0100234
Alan D. Brunelleac9fafa2008-04-29 14:44:19 +0200235 return ret;
236}
237
Jens Axboec7c22e42008-09-13 20:26:01 +0200238static ssize_t queue_rq_affinity_show(struct request_queue *q, char *page)
239{
Xiaotian Feng9cb308c2009-07-17 15:26:26 +0800240 bool set = test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags);
Jens Axboec7c22e42008-09-13 20:26:01 +0200241
Xiaotian Feng9cb308c2009-07-17 15:26:26 +0800242 return queue_var_show(set, page);
Jens Axboec7c22e42008-09-13 20:26:01 +0200243}
244
245static ssize_t
246queue_rq_affinity_store(struct request_queue *q, const char *page, size_t count)
247{
248 ssize_t ret = -EINVAL;
249#if defined(CONFIG_USE_GENERIC_SMP_HELPERS)
250 unsigned long val;
251
252 ret = queue_var_store(&val, page, count);
253 spin_lock_irq(q->queue_lock);
254 if (val)
255 queue_flag_set(QUEUE_FLAG_SAME_COMP, q);
256 else
257 queue_flag_clear(QUEUE_FLAG_SAME_COMP, q);
258 spin_unlock_irq(q->queue_lock);
259#endif
260 return ret;
261}
Jens Axboe8324aa92008-01-29 14:51:59 +0100262
263static struct queue_sysfs_entry queue_requests_entry = {
264 .attr = {.name = "nr_requests", .mode = S_IRUGO | S_IWUSR },
265 .show = queue_requests_show,
266 .store = queue_requests_store,
267};
268
269static struct queue_sysfs_entry queue_ra_entry = {
270 .attr = {.name = "read_ahead_kb", .mode = S_IRUGO | S_IWUSR },
271 .show = queue_ra_show,
272 .store = queue_ra_store,
273};
274
275static struct queue_sysfs_entry queue_max_sectors_entry = {
276 .attr = {.name = "max_sectors_kb", .mode = S_IRUGO | S_IWUSR },
277 .show = queue_max_sectors_show,
278 .store = queue_max_sectors_store,
279};
280
281static struct queue_sysfs_entry queue_max_hw_sectors_entry = {
282 .attr = {.name = "max_hw_sectors_kb", .mode = S_IRUGO },
283 .show = queue_max_hw_sectors_show,
284};
285
Martin K. Petersenc77a5712010-03-10 00:48:33 -0500286static struct queue_sysfs_entry queue_max_segments_entry = {
287 .attr = {.name = "max_segments", .mode = S_IRUGO },
288 .show = queue_max_segments_show,
289};
290
291static struct queue_sysfs_entry queue_max_segment_size_entry = {
292 .attr = {.name = "max_segment_size", .mode = S_IRUGO },
293 .show = queue_max_segment_size_show,
294};
295
Jens Axboe8324aa92008-01-29 14:51:59 +0100296static struct queue_sysfs_entry queue_iosched_entry = {
297 .attr = {.name = "scheduler", .mode = S_IRUGO | S_IWUSR },
298 .show = elv_iosched_show,
299 .store = elv_iosched_store,
300};
301
Martin K. Petersene68b9032008-01-29 19:14:08 +0100302static struct queue_sysfs_entry queue_hw_sector_size_entry = {
303 .attr = {.name = "hw_sector_size", .mode = S_IRUGO },
Martin K. Petersene1defc42009-05-22 17:17:49 -0400304 .show = queue_logical_block_size_show,
305};
306
307static struct queue_sysfs_entry queue_logical_block_size_entry = {
308 .attr = {.name = "logical_block_size", .mode = S_IRUGO },
309 .show = queue_logical_block_size_show,
Martin K. Petersene68b9032008-01-29 19:14:08 +0100310};
311
Martin K. Petersenc72758f2009-05-22 17:17:53 -0400312static struct queue_sysfs_entry queue_physical_block_size_entry = {
313 .attr = {.name = "physical_block_size", .mode = S_IRUGO },
314 .show = queue_physical_block_size_show,
315};
316
317static struct queue_sysfs_entry queue_io_min_entry = {
318 .attr = {.name = "minimum_io_size", .mode = S_IRUGO },
319 .show = queue_io_min_show,
320};
321
322static struct queue_sysfs_entry queue_io_opt_entry = {
323 .attr = {.name = "optimal_io_size", .mode = S_IRUGO },
324 .show = queue_io_opt_show,
Jens Axboe8324aa92008-01-29 14:51:59 +0100325};
326
Martin K. Petersen86b37282009-11-10 11:50:21 +0100327static struct queue_sysfs_entry queue_discard_granularity_entry = {
328 .attr = {.name = "discard_granularity", .mode = S_IRUGO },
329 .show = queue_discard_granularity_show,
330};
331
332static struct queue_sysfs_entry queue_discard_max_entry = {
333 .attr = {.name = "discard_max_bytes", .mode = S_IRUGO },
334 .show = queue_discard_max_show,
335};
336
Martin K. Petersen98262f22009-12-03 09:24:48 +0100337static struct queue_sysfs_entry queue_discard_zeroes_data_entry = {
338 .attr = {.name = "discard_zeroes_data", .mode = S_IRUGO },
339 .show = queue_discard_zeroes_data_show,
340};
341
Bartlomiej Zolnierkiewicz1308835f2009-01-07 12:22:39 +0100342static struct queue_sysfs_entry queue_nonrot_entry = {
343 .attr = {.name = "rotational", .mode = S_IRUGO | S_IWUSR },
Jens Axboe956bcb72010-08-07 18:13:50 +0200344 .show = queue_show_nonrot,
345 .store = queue_store_nonrot,
Bartlomiej Zolnierkiewicz1308835f2009-01-07 12:22:39 +0100346};
347
Alan D. Brunelleac9fafa2008-04-29 14:44:19 +0200348static struct queue_sysfs_entry queue_nomerges_entry = {
349 .attr = {.name = "nomerges", .mode = S_IRUGO | S_IWUSR },
350 .show = queue_nomerges_show,
351 .store = queue_nomerges_store,
352};
353
Jens Axboec7c22e42008-09-13 20:26:01 +0200354static struct queue_sysfs_entry queue_rq_affinity_entry = {
355 .attr = {.name = "rq_affinity", .mode = S_IRUGO | S_IWUSR },
356 .show = queue_rq_affinity_show,
357 .store = queue_rq_affinity_store,
358};
359
Jens Axboebc58ba92009-01-23 10:54:44 +0100360static struct queue_sysfs_entry queue_iostats_entry = {
361 .attr = {.name = "iostats", .mode = S_IRUGO | S_IWUSR },
Jens Axboe956bcb72010-08-07 18:13:50 +0200362 .show = queue_show_iostats,
363 .store = queue_store_iostats,
Jens Axboebc58ba92009-01-23 10:54:44 +0100364};
365
Jens Axboee2e1a142010-06-09 10:42:09 +0200366static struct queue_sysfs_entry queue_random_entry = {
367 .attr = {.name = "add_random", .mode = S_IRUGO | S_IWUSR },
Jens Axboe956bcb72010-08-07 18:13:50 +0200368 .show = queue_show_random,
369 .store = queue_store_random,
Jens Axboee2e1a142010-06-09 10:42:09 +0200370};
371
Jens Axboe8324aa92008-01-29 14:51:59 +0100372static struct attribute *default_attrs[] = {
373 &queue_requests_entry.attr,
374 &queue_ra_entry.attr,
375 &queue_max_hw_sectors_entry.attr,
376 &queue_max_sectors_entry.attr,
Martin K. Petersenc77a5712010-03-10 00:48:33 -0500377 &queue_max_segments_entry.attr,
378 &queue_max_segment_size_entry.attr,
Jens Axboe8324aa92008-01-29 14:51:59 +0100379 &queue_iosched_entry.attr,
Martin K. Petersene68b9032008-01-29 19:14:08 +0100380 &queue_hw_sector_size_entry.attr,
Martin K. Petersene1defc42009-05-22 17:17:49 -0400381 &queue_logical_block_size_entry.attr,
Martin K. Petersenc72758f2009-05-22 17:17:53 -0400382 &queue_physical_block_size_entry.attr,
383 &queue_io_min_entry.attr,
384 &queue_io_opt_entry.attr,
Martin K. Petersen86b37282009-11-10 11:50:21 +0100385 &queue_discard_granularity_entry.attr,
386 &queue_discard_max_entry.attr,
Martin K. Petersen98262f22009-12-03 09:24:48 +0100387 &queue_discard_zeroes_data_entry.attr,
Bartlomiej Zolnierkiewicz1308835f2009-01-07 12:22:39 +0100388 &queue_nonrot_entry.attr,
Alan D. Brunelleac9fafa2008-04-29 14:44:19 +0200389 &queue_nomerges_entry.attr,
Jens Axboec7c22e42008-09-13 20:26:01 +0200390 &queue_rq_affinity_entry.attr,
Jens Axboebc58ba92009-01-23 10:54:44 +0100391 &queue_iostats_entry.attr,
Jens Axboee2e1a142010-06-09 10:42:09 +0200392 &queue_random_entry.attr,
Jens Axboe8324aa92008-01-29 14:51:59 +0100393 NULL,
394};
395
396#define to_queue(atr) container_of((atr), struct queue_sysfs_entry, attr)
397
398static ssize_t
399queue_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
400{
401 struct queue_sysfs_entry *entry = to_queue(attr);
402 struct request_queue *q =
403 container_of(kobj, struct request_queue, kobj);
404 ssize_t res;
405
406 if (!entry->show)
407 return -EIO;
408 mutex_lock(&q->sysfs_lock);
409 if (test_bit(QUEUE_FLAG_DEAD, &q->queue_flags)) {
410 mutex_unlock(&q->sysfs_lock);
411 return -ENOENT;
412 }
413 res = entry->show(q, page);
414 mutex_unlock(&q->sysfs_lock);
415 return res;
416}
417
418static ssize_t
419queue_attr_store(struct kobject *kobj, struct attribute *attr,
420 const char *page, size_t length)
421{
422 struct queue_sysfs_entry *entry = to_queue(attr);
Jens Axboe6728cb02008-01-31 13:03:55 +0100423 struct request_queue *q;
Jens Axboe8324aa92008-01-29 14:51:59 +0100424 ssize_t res;
425
426 if (!entry->store)
427 return -EIO;
Jens Axboe6728cb02008-01-31 13:03:55 +0100428
429 q = container_of(kobj, struct request_queue, kobj);
Jens Axboe8324aa92008-01-29 14:51:59 +0100430 mutex_lock(&q->sysfs_lock);
431 if (test_bit(QUEUE_FLAG_DEAD, &q->queue_flags)) {
432 mutex_unlock(&q->sysfs_lock);
433 return -ENOENT;
434 }
435 res = entry->store(q, page, length);
436 mutex_unlock(&q->sysfs_lock);
437 return res;
438}
439
440/**
441 * blk_cleanup_queue: - release a &struct request_queue when it is no longer needed
442 * @kobj: the kobj belonging of the request queue to be released
443 *
444 * Description:
445 * blk_cleanup_queue is the pair to blk_init_queue() or
446 * blk_queue_make_request(). It should be called when a request queue is
447 * being released; typically when a block device is being de-registered.
448 * Currently, its primary task it to free all the &struct request
449 * structures that were allocated to the queue and the queue itself.
450 *
451 * Caveat:
452 * Hopefully the low level driver will have finished any
453 * outstanding requests first...
454 **/
455static void blk_release_queue(struct kobject *kobj)
456{
457 struct request_queue *q =
458 container_of(kobj, struct request_queue, kobj);
459 struct request_list *rl = &q->rq;
460
461 blk_sync_queue(q);
462
463 if (rl->rq_pool)
464 mempool_destroy(rl->rq_pool);
465
466 if (q->queue_tags)
467 __blk_queue_free_tags(q);
468
469 blk_trace_shutdown(q);
470
471 bdi_destroy(&q->backing_dev_info);
472 kmem_cache_free(blk_requestq_cachep, q);
473}
474
Emese Revfy52cf25d2010-01-19 02:58:23 +0100475static const struct sysfs_ops queue_sysfs_ops = {
Jens Axboe8324aa92008-01-29 14:51:59 +0100476 .show = queue_attr_show,
477 .store = queue_attr_store,
478};
479
480struct kobj_type blk_queue_ktype = {
481 .sysfs_ops = &queue_sysfs_ops,
482 .default_attrs = default_attrs,
483 .release = blk_release_queue,
484};
485
486int blk_register_queue(struct gendisk *disk)
487{
488 int ret;
Li Zefan1d54ad62009-04-14 14:00:05 +0800489 struct device *dev = disk_to_dev(disk);
Jens Axboe8324aa92008-01-29 14:51:59 +0100490
491 struct request_queue *q = disk->queue;
492
Akinobu Mitafb199742008-04-21 09:51:06 +0200493 if (WARN_ON(!q))
Jens Axboe8324aa92008-01-29 14:51:59 +0100494 return -ENXIO;
495
Li Zefan1d54ad62009-04-14 14:00:05 +0800496 ret = blk_trace_init_sysfs(dev);
497 if (ret)
498 return ret;
499
Linus Torvaldsc9059592009-06-11 10:52:27 -0700500 ret = kobject_add(&q->kobj, kobject_get(&dev->kobj), "%s", "queue");
Jens Axboe8324aa92008-01-29 14:51:59 +0100501 if (ret < 0)
502 return ret;
503
504 kobject_uevent(&q->kobj, KOBJ_ADD);
505
Martin K. Petersencd43e262009-05-22 17:17:52 -0400506 if (!q->request_fn)
507 return 0;
508
Jens Axboe8324aa92008-01-29 14:51:59 +0100509 ret = elv_register_queue(q);
510 if (ret) {
511 kobject_uevent(&q->kobj, KOBJ_REMOVE);
512 kobject_del(&q->kobj);
Zdenek Kabelac48c0d4d2009-09-25 06:19:26 +0200513 blk_trace_remove_sysfs(disk_to_dev(disk));
Xiaotian Fengc87ffbb2010-08-23 12:30:29 +0200514 kobject_put(&dev->kobj);
Jens Axboe8324aa92008-01-29 14:51:59 +0100515 return ret;
516 }
517
518 return 0;
519}
520
521void blk_unregister_queue(struct gendisk *disk)
522{
523 struct request_queue *q = disk->queue;
524
Akinobu Mitafb199742008-04-21 09:51:06 +0200525 if (WARN_ON(!q))
526 return;
527
Zdenek Kabelac48c0d4d2009-09-25 06:19:26 +0200528 if (q->request_fn)
Jens Axboe8324aa92008-01-29 14:51:59 +0100529 elv_unregister_queue(q);
530
Zdenek Kabelac48c0d4d2009-09-25 06:19:26 +0200531 kobject_uevent(&q->kobj, KOBJ_REMOVE);
532 kobject_del(&q->kobj);
533 blk_trace_remove_sysfs(disk_to_dev(disk));
534 kobject_put(&disk_to_dev(disk)->kobj);
Jens Axboe8324aa92008-01-29 14:51:59 +0100535}