blob: 8606c9543fdda0368c6bdde8b4af2d00584e674d [file] [log] [blame]
Jens Axboe8324aa92008-01-29 14:51:59 +01001/*
2 * Functions related to sysfs handling
3 */
4#include <linux/kernel.h>
5#include <linux/module.h>
6#include <linux/bio.h>
7#include <linux/blkdev.h>
8#include <linux/blktrace_api.h>
9
10#include "blk.h"
11
12struct queue_sysfs_entry {
13 struct attribute attr;
14 ssize_t (*show)(struct request_queue *, char *);
15 ssize_t (*store)(struct request_queue *, const char *, size_t);
16};
17
18static ssize_t
Xiaotian Feng9cb308c2009-07-17 15:26:26 +080019queue_var_show(unsigned long var, char *page)
Jens Axboe8324aa92008-01-29 14:51:59 +010020{
Xiaotian Feng9cb308c2009-07-17 15:26:26 +080021 return sprintf(page, "%lu\n", var);
Jens Axboe8324aa92008-01-29 14:51:59 +010022}
23
24static ssize_t
25queue_var_store(unsigned long *var, const char *page, size_t count)
26{
27 char *p = (char *) page;
28
29 *var = simple_strtoul(p, &p, 10);
30 return count;
31}
32
33static ssize_t queue_requests_show(struct request_queue *q, char *page)
34{
35 return queue_var_show(q->nr_requests, (page));
36}
37
38static ssize_t
39queue_requests_store(struct request_queue *q, const char *page, size_t count)
40{
41 struct request_list *rl = &q->rq;
42 unsigned long nr;
Jens Axboeb8a9ae72009-09-11 22:44:29 +020043 int ret;
44
45 if (!q->request_fn)
46 return -EINVAL;
47
48 ret = queue_var_store(&nr, page, count);
Jens Axboe8324aa92008-01-29 14:51:59 +010049 if (nr < BLKDEV_MIN_RQ)
50 nr = BLKDEV_MIN_RQ;
51
52 spin_lock_irq(q->queue_lock);
53 q->nr_requests = nr;
54 blk_queue_congestion_threshold(q);
55
Jens Axboe1faa16d2009-04-06 14:48:01 +020056 if (rl->count[BLK_RW_SYNC] >= queue_congestion_on_threshold(q))
57 blk_set_queue_congested(q, BLK_RW_SYNC);
58 else if (rl->count[BLK_RW_SYNC] < queue_congestion_off_threshold(q))
59 blk_clear_queue_congested(q, BLK_RW_SYNC);
Jens Axboe8324aa92008-01-29 14:51:59 +010060
Jens Axboe1faa16d2009-04-06 14:48:01 +020061 if (rl->count[BLK_RW_ASYNC] >= queue_congestion_on_threshold(q))
62 blk_set_queue_congested(q, BLK_RW_ASYNC);
63 else if (rl->count[BLK_RW_ASYNC] < queue_congestion_off_threshold(q))
64 blk_clear_queue_congested(q, BLK_RW_ASYNC);
Jens Axboe8324aa92008-01-29 14:51:59 +010065
Jens Axboe1faa16d2009-04-06 14:48:01 +020066 if (rl->count[BLK_RW_SYNC] >= q->nr_requests) {
67 blk_set_queue_full(q, BLK_RW_SYNC);
68 } else if (rl->count[BLK_RW_SYNC]+1 <= q->nr_requests) {
69 blk_clear_queue_full(q, BLK_RW_SYNC);
70 wake_up(&rl->wait[BLK_RW_SYNC]);
Jens Axboe8324aa92008-01-29 14:51:59 +010071 }
72
Jens Axboe1faa16d2009-04-06 14:48:01 +020073 if (rl->count[BLK_RW_ASYNC] >= q->nr_requests) {
74 blk_set_queue_full(q, BLK_RW_ASYNC);
75 } else if (rl->count[BLK_RW_ASYNC]+1 <= q->nr_requests) {
76 blk_clear_queue_full(q, BLK_RW_ASYNC);
77 wake_up(&rl->wait[BLK_RW_ASYNC]);
Jens Axboe8324aa92008-01-29 14:51:59 +010078 }
79 spin_unlock_irq(q->queue_lock);
80 return ret;
81}
82
83static ssize_t queue_ra_show(struct request_queue *q, char *page)
84{
Xiaotian Feng9cb308c2009-07-17 15:26:26 +080085 unsigned long ra_kb = q->backing_dev_info.ra_pages <<
86 (PAGE_CACHE_SHIFT - 10);
Jens Axboe8324aa92008-01-29 14:51:59 +010087
88 return queue_var_show(ra_kb, (page));
89}
90
91static ssize_t
92queue_ra_store(struct request_queue *q, const char *page, size_t count)
93{
94 unsigned long ra_kb;
95 ssize_t ret = queue_var_store(&ra_kb, page, count);
96
Jens Axboe8324aa92008-01-29 14:51:59 +010097 q->backing_dev_info.ra_pages = ra_kb >> (PAGE_CACHE_SHIFT - 10);
Jens Axboe8324aa92008-01-29 14:51:59 +010098
99 return ret;
100}
101
102static ssize_t queue_max_sectors_show(struct request_queue *q, char *page)
103{
Martin K. Petersenae03bf62009-05-22 17:17:50 -0400104 int max_sectors_kb = queue_max_sectors(q) >> 1;
Jens Axboe8324aa92008-01-29 14:51:59 +0100105
106 return queue_var_show(max_sectors_kb, (page));
107}
108
Martin K. Petersene1defc42009-05-22 17:17:49 -0400109static ssize_t queue_logical_block_size_show(struct request_queue *q, char *page)
Martin K. Petersene68b9032008-01-29 19:14:08 +0100110{
Martin K. Petersene1defc42009-05-22 17:17:49 -0400111 return queue_var_show(queue_logical_block_size(q), page);
Martin K. Petersene68b9032008-01-29 19:14:08 +0100112}
113
Martin K. Petersenc72758f2009-05-22 17:17:53 -0400114static ssize_t queue_physical_block_size_show(struct request_queue *q, char *page)
115{
116 return queue_var_show(queue_physical_block_size(q), page);
117}
118
119static ssize_t queue_io_min_show(struct request_queue *q, char *page)
120{
121 return queue_var_show(queue_io_min(q), page);
122}
123
124static ssize_t queue_io_opt_show(struct request_queue *q, char *page)
125{
126 return queue_var_show(queue_io_opt(q), page);
Jens Axboe8324aa92008-01-29 14:51:59 +0100127}
128
Martin K. Petersen86b37282009-11-10 11:50:21 +0100129static ssize_t queue_discard_granularity_show(struct request_queue *q, char *page)
130{
131 return queue_var_show(q->limits.discard_granularity, page);
132}
133
134static ssize_t queue_discard_max_show(struct request_queue *q, char *page)
135{
136 return queue_var_show(q->limits.max_discard_sectors << 9, page);
137}
138
Martin K. Petersen98262f22009-12-03 09:24:48 +0100139static ssize_t queue_discard_zeroes_data_show(struct request_queue *q, char *page)
140{
141 return queue_var_show(queue_discard_zeroes_data(q), page);
142}
143
Jens Axboe8324aa92008-01-29 14:51:59 +0100144static ssize_t
145queue_max_sectors_store(struct request_queue *q, const char *page, size_t count)
146{
147 unsigned long max_sectors_kb,
Martin K. Petersenae03bf62009-05-22 17:17:50 -0400148 max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1,
Jens Axboe8324aa92008-01-29 14:51:59 +0100149 page_kb = 1 << (PAGE_CACHE_SHIFT - 10);
150 ssize_t ret = queue_var_store(&max_sectors_kb, page, count);
151
152 if (max_sectors_kb > max_hw_sectors_kb || max_sectors_kb < page_kb)
153 return -EINVAL;
Wu Fengguang7c239512008-11-25 09:08:39 +0100154
Jens Axboe8324aa92008-01-29 14:51:59 +0100155 spin_lock_irq(q->queue_lock);
Nikanth Karthikesanc295fc02009-09-01 22:40:15 +0200156 q->limits.max_sectors = max_sectors_kb << 1;
Jens Axboe8324aa92008-01-29 14:51:59 +0100157 spin_unlock_irq(q->queue_lock);
158
159 return ret;
160}
161
162static ssize_t queue_max_hw_sectors_show(struct request_queue *q, char *page)
163{
Martin K. Petersenae03bf62009-05-22 17:17:50 -0400164 int max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1;
Jens Axboe8324aa92008-01-29 14:51:59 +0100165
166 return queue_var_show(max_hw_sectors_kb, (page));
167}
168
Bartlomiej Zolnierkiewicz1308835f2009-01-07 12:22:39 +0100169static ssize_t queue_nonrot_show(struct request_queue *q, char *page)
170{
171 return queue_var_show(!blk_queue_nonrot(q), page);
172}
173
174static ssize_t queue_nonrot_store(struct request_queue *q, const char *page,
175 size_t count)
176{
177 unsigned long nm;
178 ssize_t ret = queue_var_store(&nm, page, count);
179
180 spin_lock_irq(q->queue_lock);
181 if (nm)
182 queue_flag_clear(QUEUE_FLAG_NONROT, q);
183 else
184 queue_flag_set(QUEUE_FLAG_NONROT, q);
185 spin_unlock_irq(q->queue_lock);
186
187 return ret;
188}
189
Alan D. Brunelleac9fafa2008-04-29 14:44:19 +0200190static ssize_t queue_nomerges_show(struct request_queue *q, char *page)
191{
192 return queue_var_show(blk_queue_nomerges(q), page);
193}
194
195static ssize_t queue_nomerges_store(struct request_queue *q, const char *page,
196 size_t count)
197{
198 unsigned long nm;
199 ssize_t ret = queue_var_store(&nm, page, count);
200
Jens Axboebf0f9702008-05-07 09:09:39 +0200201 spin_lock_irq(q->queue_lock);
Alan D. Brunelleac9fafa2008-04-29 14:44:19 +0200202 if (nm)
Jens Axboebf0f9702008-05-07 09:09:39 +0200203 queue_flag_set(QUEUE_FLAG_NOMERGES, q);
Alan D. Brunelleac9fafa2008-04-29 14:44:19 +0200204 else
Jens Axboebf0f9702008-05-07 09:09:39 +0200205 queue_flag_clear(QUEUE_FLAG_NOMERGES, q);
Jens Axboebf0f9702008-05-07 09:09:39 +0200206 spin_unlock_irq(q->queue_lock);
Bartlomiej Zolnierkiewicz1308835f2009-01-07 12:22:39 +0100207
Alan D. Brunelleac9fafa2008-04-29 14:44:19 +0200208 return ret;
209}
210
Jens Axboec7c22e42008-09-13 20:26:01 +0200211static ssize_t queue_rq_affinity_show(struct request_queue *q, char *page)
212{
Xiaotian Feng9cb308c2009-07-17 15:26:26 +0800213 bool set = test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags);
Jens Axboec7c22e42008-09-13 20:26:01 +0200214
Xiaotian Feng9cb308c2009-07-17 15:26:26 +0800215 return queue_var_show(set, page);
Jens Axboec7c22e42008-09-13 20:26:01 +0200216}
217
218static ssize_t
219queue_rq_affinity_store(struct request_queue *q, const char *page, size_t count)
220{
221 ssize_t ret = -EINVAL;
222#if defined(CONFIG_USE_GENERIC_SMP_HELPERS)
223 unsigned long val;
224
225 ret = queue_var_store(&val, page, count);
226 spin_lock_irq(q->queue_lock);
227 if (val)
228 queue_flag_set(QUEUE_FLAG_SAME_COMP, q);
229 else
230 queue_flag_clear(QUEUE_FLAG_SAME_COMP, q);
231 spin_unlock_irq(q->queue_lock);
232#endif
233 return ret;
234}
Jens Axboe8324aa92008-01-29 14:51:59 +0100235
Jens Axboebc58ba92009-01-23 10:54:44 +0100236static ssize_t queue_iostats_show(struct request_queue *q, char *page)
237{
238 return queue_var_show(blk_queue_io_stat(q), page);
239}
240
241static ssize_t queue_iostats_store(struct request_queue *q, const char *page,
242 size_t count)
243{
244 unsigned long stats;
245 ssize_t ret = queue_var_store(&stats, page, count);
246
247 spin_lock_irq(q->queue_lock);
248 if (stats)
249 queue_flag_set(QUEUE_FLAG_IO_STAT, q);
250 else
251 queue_flag_clear(QUEUE_FLAG_IO_STAT, q);
252 spin_unlock_irq(q->queue_lock);
253
254 return ret;
255}
256
Jens Axboe8324aa92008-01-29 14:51:59 +0100257static struct queue_sysfs_entry queue_requests_entry = {
258 .attr = {.name = "nr_requests", .mode = S_IRUGO | S_IWUSR },
259 .show = queue_requests_show,
260 .store = queue_requests_store,
261};
262
263static struct queue_sysfs_entry queue_ra_entry = {
264 .attr = {.name = "read_ahead_kb", .mode = S_IRUGO | S_IWUSR },
265 .show = queue_ra_show,
266 .store = queue_ra_store,
267};
268
269static struct queue_sysfs_entry queue_max_sectors_entry = {
270 .attr = {.name = "max_sectors_kb", .mode = S_IRUGO | S_IWUSR },
271 .show = queue_max_sectors_show,
272 .store = queue_max_sectors_store,
273};
274
275static struct queue_sysfs_entry queue_max_hw_sectors_entry = {
276 .attr = {.name = "max_hw_sectors_kb", .mode = S_IRUGO },
277 .show = queue_max_hw_sectors_show,
278};
279
280static struct queue_sysfs_entry queue_iosched_entry = {
281 .attr = {.name = "scheduler", .mode = S_IRUGO | S_IWUSR },
282 .show = elv_iosched_show,
283 .store = elv_iosched_store,
284};
285
Martin K. Petersene68b9032008-01-29 19:14:08 +0100286static struct queue_sysfs_entry queue_hw_sector_size_entry = {
287 .attr = {.name = "hw_sector_size", .mode = S_IRUGO },
Martin K. Petersene1defc42009-05-22 17:17:49 -0400288 .show = queue_logical_block_size_show,
289};
290
291static struct queue_sysfs_entry queue_logical_block_size_entry = {
292 .attr = {.name = "logical_block_size", .mode = S_IRUGO },
293 .show = queue_logical_block_size_show,
Martin K. Petersene68b9032008-01-29 19:14:08 +0100294};
295
Martin K. Petersenc72758f2009-05-22 17:17:53 -0400296static struct queue_sysfs_entry queue_physical_block_size_entry = {
297 .attr = {.name = "physical_block_size", .mode = S_IRUGO },
298 .show = queue_physical_block_size_show,
299};
300
301static struct queue_sysfs_entry queue_io_min_entry = {
302 .attr = {.name = "minimum_io_size", .mode = S_IRUGO },
303 .show = queue_io_min_show,
304};
305
306static struct queue_sysfs_entry queue_io_opt_entry = {
307 .attr = {.name = "optimal_io_size", .mode = S_IRUGO },
308 .show = queue_io_opt_show,
Jens Axboe8324aa92008-01-29 14:51:59 +0100309};
310
Martin K. Petersen86b37282009-11-10 11:50:21 +0100311static struct queue_sysfs_entry queue_discard_granularity_entry = {
312 .attr = {.name = "discard_granularity", .mode = S_IRUGO },
313 .show = queue_discard_granularity_show,
314};
315
316static struct queue_sysfs_entry queue_discard_max_entry = {
317 .attr = {.name = "discard_max_bytes", .mode = S_IRUGO },
318 .show = queue_discard_max_show,
319};
320
Martin K. Petersen98262f22009-12-03 09:24:48 +0100321static struct queue_sysfs_entry queue_discard_zeroes_data_entry = {
322 .attr = {.name = "discard_zeroes_data", .mode = S_IRUGO },
323 .show = queue_discard_zeroes_data_show,
324};
325
Bartlomiej Zolnierkiewicz1308835f2009-01-07 12:22:39 +0100326static struct queue_sysfs_entry queue_nonrot_entry = {
327 .attr = {.name = "rotational", .mode = S_IRUGO | S_IWUSR },
328 .show = queue_nonrot_show,
329 .store = queue_nonrot_store,
330};
331
Alan D. Brunelleac9fafa2008-04-29 14:44:19 +0200332static struct queue_sysfs_entry queue_nomerges_entry = {
333 .attr = {.name = "nomerges", .mode = S_IRUGO | S_IWUSR },
334 .show = queue_nomerges_show,
335 .store = queue_nomerges_store,
336};
337
Jens Axboec7c22e42008-09-13 20:26:01 +0200338static struct queue_sysfs_entry queue_rq_affinity_entry = {
339 .attr = {.name = "rq_affinity", .mode = S_IRUGO | S_IWUSR },
340 .show = queue_rq_affinity_show,
341 .store = queue_rq_affinity_store,
342};
343
Jens Axboebc58ba92009-01-23 10:54:44 +0100344static struct queue_sysfs_entry queue_iostats_entry = {
345 .attr = {.name = "iostats", .mode = S_IRUGO | S_IWUSR },
346 .show = queue_iostats_show,
347 .store = queue_iostats_store,
348};
349
Jens Axboe8324aa92008-01-29 14:51:59 +0100350static struct attribute *default_attrs[] = {
351 &queue_requests_entry.attr,
352 &queue_ra_entry.attr,
353 &queue_max_hw_sectors_entry.attr,
354 &queue_max_sectors_entry.attr,
355 &queue_iosched_entry.attr,
Martin K. Petersene68b9032008-01-29 19:14:08 +0100356 &queue_hw_sector_size_entry.attr,
Martin K. Petersene1defc42009-05-22 17:17:49 -0400357 &queue_logical_block_size_entry.attr,
Martin K. Petersenc72758f2009-05-22 17:17:53 -0400358 &queue_physical_block_size_entry.attr,
359 &queue_io_min_entry.attr,
360 &queue_io_opt_entry.attr,
Martin K. Petersen86b37282009-11-10 11:50:21 +0100361 &queue_discard_granularity_entry.attr,
362 &queue_discard_max_entry.attr,
Martin K. Petersen98262f22009-12-03 09:24:48 +0100363 &queue_discard_zeroes_data_entry.attr,
Bartlomiej Zolnierkiewicz1308835f2009-01-07 12:22:39 +0100364 &queue_nonrot_entry.attr,
Alan D. Brunelleac9fafa2008-04-29 14:44:19 +0200365 &queue_nomerges_entry.attr,
Jens Axboec7c22e42008-09-13 20:26:01 +0200366 &queue_rq_affinity_entry.attr,
Jens Axboebc58ba92009-01-23 10:54:44 +0100367 &queue_iostats_entry.attr,
Jens Axboe8324aa92008-01-29 14:51:59 +0100368 NULL,
369};
370
371#define to_queue(atr) container_of((atr), struct queue_sysfs_entry, attr)
372
373static ssize_t
374queue_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
375{
376 struct queue_sysfs_entry *entry = to_queue(attr);
377 struct request_queue *q =
378 container_of(kobj, struct request_queue, kobj);
379 ssize_t res;
380
381 if (!entry->show)
382 return -EIO;
383 mutex_lock(&q->sysfs_lock);
384 if (test_bit(QUEUE_FLAG_DEAD, &q->queue_flags)) {
385 mutex_unlock(&q->sysfs_lock);
386 return -ENOENT;
387 }
388 res = entry->show(q, page);
389 mutex_unlock(&q->sysfs_lock);
390 return res;
391}
392
393static ssize_t
394queue_attr_store(struct kobject *kobj, struct attribute *attr,
395 const char *page, size_t length)
396{
397 struct queue_sysfs_entry *entry = to_queue(attr);
Jens Axboe6728cb02008-01-31 13:03:55 +0100398 struct request_queue *q;
Jens Axboe8324aa92008-01-29 14:51:59 +0100399 ssize_t res;
400
401 if (!entry->store)
402 return -EIO;
Jens Axboe6728cb02008-01-31 13:03:55 +0100403
404 q = container_of(kobj, struct request_queue, kobj);
Jens Axboe8324aa92008-01-29 14:51:59 +0100405 mutex_lock(&q->sysfs_lock);
406 if (test_bit(QUEUE_FLAG_DEAD, &q->queue_flags)) {
407 mutex_unlock(&q->sysfs_lock);
408 return -ENOENT;
409 }
410 res = entry->store(q, page, length);
411 mutex_unlock(&q->sysfs_lock);
412 return res;
413}
414
415/**
416 * blk_cleanup_queue: - release a &struct request_queue when it is no longer needed
417 * @kobj: the kobj belonging of the request queue to be released
418 *
419 * Description:
420 * blk_cleanup_queue is the pair to blk_init_queue() or
421 * blk_queue_make_request(). It should be called when a request queue is
422 * being released; typically when a block device is being de-registered.
423 * Currently, its primary task it to free all the &struct request
424 * structures that were allocated to the queue and the queue itself.
425 *
426 * Caveat:
427 * Hopefully the low level driver will have finished any
428 * outstanding requests first...
429 **/
430static void blk_release_queue(struct kobject *kobj)
431{
432 struct request_queue *q =
433 container_of(kobj, struct request_queue, kobj);
434 struct request_list *rl = &q->rq;
435
436 blk_sync_queue(q);
437
438 if (rl->rq_pool)
439 mempool_destroy(rl->rq_pool);
440
441 if (q->queue_tags)
442 __blk_queue_free_tags(q);
443
444 blk_trace_shutdown(q);
445
446 bdi_destroy(&q->backing_dev_info);
447 kmem_cache_free(blk_requestq_cachep, q);
448}
449
450static struct sysfs_ops queue_sysfs_ops = {
451 .show = queue_attr_show,
452 .store = queue_attr_store,
453};
454
455struct kobj_type blk_queue_ktype = {
456 .sysfs_ops = &queue_sysfs_ops,
457 .default_attrs = default_attrs,
458 .release = blk_release_queue,
459};
460
461int blk_register_queue(struct gendisk *disk)
462{
463 int ret;
Li Zefan1d54ad62009-04-14 14:00:05 +0800464 struct device *dev = disk_to_dev(disk);
Jens Axboe8324aa92008-01-29 14:51:59 +0100465
466 struct request_queue *q = disk->queue;
467
Akinobu Mitafb199742008-04-21 09:51:06 +0200468 if (WARN_ON(!q))
Jens Axboe8324aa92008-01-29 14:51:59 +0100469 return -ENXIO;
470
Li Zefan1d54ad62009-04-14 14:00:05 +0800471 ret = blk_trace_init_sysfs(dev);
472 if (ret)
473 return ret;
474
Linus Torvaldsc9059592009-06-11 10:52:27 -0700475 ret = kobject_add(&q->kobj, kobject_get(&dev->kobj), "%s", "queue");
Jens Axboe8324aa92008-01-29 14:51:59 +0100476 if (ret < 0)
477 return ret;
478
479 kobject_uevent(&q->kobj, KOBJ_ADD);
480
Martin K. Petersencd43e262009-05-22 17:17:52 -0400481 if (!q->request_fn)
482 return 0;
483
Jens Axboe8324aa92008-01-29 14:51:59 +0100484 ret = elv_register_queue(q);
485 if (ret) {
486 kobject_uevent(&q->kobj, KOBJ_REMOVE);
487 kobject_del(&q->kobj);
Zdenek Kabelac48c0d4d2009-09-25 06:19:26 +0200488 blk_trace_remove_sysfs(disk_to_dev(disk));
Jens Axboe8324aa92008-01-29 14:51:59 +0100489 return ret;
490 }
491
492 return 0;
493}
494
495void blk_unregister_queue(struct gendisk *disk)
496{
497 struct request_queue *q = disk->queue;
498
Akinobu Mitafb199742008-04-21 09:51:06 +0200499 if (WARN_ON(!q))
500 return;
501
Zdenek Kabelac48c0d4d2009-09-25 06:19:26 +0200502 if (q->request_fn)
Jens Axboe8324aa92008-01-29 14:51:59 +0100503 elv_unregister_queue(q);
504
Zdenek Kabelac48c0d4d2009-09-25 06:19:26 +0200505 kobject_uevent(&q->kobj, KOBJ_REMOVE);
506 kobject_del(&q->kobj);
507 blk_trace_remove_sysfs(disk_to_dev(disk));
508 kobject_put(&disk_to_dev(disk)->kobj);
Jens Axboe8324aa92008-01-29 14:51:59 +0100509}