Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 1 | /* |
| 2 | * Functions related to sysfs handling |
| 3 | */ |
| 4 | #include <linux/kernel.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 5 | #include <linux/slab.h> |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 6 | #include <linux/module.h> |
| 7 | #include <linux/bio.h> |
| 8 | #include <linux/blkdev.h> |
| 9 | #include <linux/blktrace_api.h> |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 10 | #include <linux/blk-mq.h> |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 11 | |
| 12 | #include "blk.h" |
Tejun Heo | 5efd611 | 2012-03-05 13:15:12 -0800 | [diff] [blame] | 13 | #include "blk-cgroup.h" |
Ming Lei | 3edcc0c | 2013-12-26 21:31:38 +0800 | [diff] [blame] | 14 | #include "blk-mq.h" |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 15 | |
| 16 | struct queue_sysfs_entry { |
| 17 | struct attribute attr; |
| 18 | ssize_t (*show)(struct request_queue *, char *); |
| 19 | ssize_t (*store)(struct request_queue *, const char *, size_t); |
| 20 | }; |
| 21 | |
| 22 | static ssize_t |
Xiaotian Feng | 9cb308c | 2009-07-17 15:26:26 +0800 | [diff] [blame] | 23 | queue_var_show(unsigned long var, char *page) |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 24 | { |
Xiaotian Feng | 9cb308c | 2009-07-17 15:26:26 +0800 | [diff] [blame] | 25 | return sprintf(page, "%lu\n", var); |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 26 | } |
| 27 | |
| 28 | static ssize_t |
| 29 | queue_var_store(unsigned long *var, const char *page, size_t count) |
| 30 | { |
Dave Reisner | b1f3b64 | 2012-09-08 11:55:45 -0400 | [diff] [blame] | 31 | int err; |
| 32 | unsigned long v; |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 33 | |
Jingoo Han | ed751e6 | 2013-09-11 14:20:08 -0700 | [diff] [blame] | 34 | err = kstrtoul(page, 10, &v); |
Dave Reisner | b1f3b64 | 2012-09-08 11:55:45 -0400 | [diff] [blame] | 35 | if (err || v > UINT_MAX) |
| 36 | return -EINVAL; |
| 37 | |
| 38 | *var = v; |
| 39 | |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 40 | return count; |
| 41 | } |
| 42 | |
| 43 | static ssize_t queue_requests_show(struct request_queue *q, char *page) |
| 44 | { |
| 45 | return queue_var_show(q->nr_requests, (page)); |
| 46 | } |
| 47 | |
| 48 | static ssize_t |
| 49 | queue_requests_store(struct request_queue *q, const char *page, size_t count) |
| 50 | { |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 51 | unsigned long nr; |
Jens Axboe | e3a2b3f | 2014-05-20 11:49:02 -0600 | [diff] [blame] | 52 | int ret, err; |
Jens Axboe | b8a9ae7 | 2009-09-11 22:44:29 +0200 | [diff] [blame] | 53 | |
Jens Axboe | e3a2b3f | 2014-05-20 11:49:02 -0600 | [diff] [blame] | 54 | if (!q->request_fn && !q->mq_ops) |
Jens Axboe | b8a9ae7 | 2009-09-11 22:44:29 +0200 | [diff] [blame] | 55 | return -EINVAL; |
| 56 | |
| 57 | ret = queue_var_store(&nr, page, count); |
Dave Reisner | b1f3b64 | 2012-09-08 11:55:45 -0400 | [diff] [blame] | 58 | if (ret < 0) |
| 59 | return ret; |
| 60 | |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 61 | if (nr < BLKDEV_MIN_RQ) |
| 62 | nr = BLKDEV_MIN_RQ; |
| 63 | |
Jens Axboe | e3a2b3f | 2014-05-20 11:49:02 -0600 | [diff] [blame] | 64 | if (q->request_fn) |
| 65 | err = blk_update_nr_requests(q, nr); |
| 66 | else |
| 67 | err = blk_mq_update_nr_requests(q, nr); |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 68 | |
Jens Axboe | e3a2b3f | 2014-05-20 11:49:02 -0600 | [diff] [blame] | 69 | if (err) |
| 70 | return err; |
Tejun Heo | a051661 | 2012-06-26 15:05:44 -0700 | [diff] [blame] | 71 | |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 72 | return ret; |
| 73 | } |
| 74 | |
| 75 | static ssize_t queue_ra_show(struct request_queue *q, char *page) |
| 76 | { |
Xiaotian Feng | 9cb308c | 2009-07-17 15:26:26 +0800 | [diff] [blame] | 77 | unsigned long ra_kb = q->backing_dev_info.ra_pages << |
| 78 | (PAGE_CACHE_SHIFT - 10); |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 79 | |
| 80 | return queue_var_show(ra_kb, (page)); |
| 81 | } |
| 82 | |
| 83 | static ssize_t |
| 84 | queue_ra_store(struct request_queue *q, const char *page, size_t count) |
| 85 | { |
| 86 | unsigned long ra_kb; |
| 87 | ssize_t ret = queue_var_store(&ra_kb, page, count); |
| 88 | |
Dave Reisner | b1f3b64 | 2012-09-08 11:55:45 -0400 | [diff] [blame] | 89 | if (ret < 0) |
| 90 | return ret; |
| 91 | |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 92 | q->backing_dev_info.ra_pages = ra_kb >> (PAGE_CACHE_SHIFT - 10); |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 93 | |
| 94 | return ret; |
| 95 | } |
| 96 | |
| 97 | static ssize_t queue_max_sectors_show(struct request_queue *q, char *page) |
| 98 | { |
Martin K. Petersen | ae03bf6 | 2009-05-22 17:17:50 -0400 | [diff] [blame] | 99 | int max_sectors_kb = queue_max_sectors(q) >> 1; |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 100 | |
| 101 | return queue_var_show(max_sectors_kb, (page)); |
| 102 | } |
| 103 | |
Martin K. Petersen | c77a571 | 2010-03-10 00:48:33 -0500 | [diff] [blame] | 104 | static ssize_t queue_max_segments_show(struct request_queue *q, char *page) |
| 105 | { |
| 106 | return queue_var_show(queue_max_segments(q), (page)); |
| 107 | } |
| 108 | |
Martin K. Petersen | 13f05c8 | 2010-09-10 20:50:10 +0200 | [diff] [blame] | 109 | static ssize_t queue_max_integrity_segments_show(struct request_queue *q, char *page) |
| 110 | { |
| 111 | return queue_var_show(q->limits.max_integrity_segments, (page)); |
| 112 | } |
| 113 | |
Martin K. Petersen | c77a571 | 2010-03-10 00:48:33 -0500 | [diff] [blame] | 114 | static ssize_t queue_max_segment_size_show(struct request_queue *q, char *page) |
| 115 | { |
Martin K. Petersen | e692cb6 | 2010-12-01 19:41:49 +0100 | [diff] [blame] | 116 | if (blk_queue_cluster(q)) |
Martin K. Petersen | c77a571 | 2010-03-10 00:48:33 -0500 | [diff] [blame] | 117 | return queue_var_show(queue_max_segment_size(q), (page)); |
| 118 | |
| 119 | return queue_var_show(PAGE_CACHE_SIZE, (page)); |
| 120 | } |
| 121 | |
Martin K. Petersen | e1defc4 | 2009-05-22 17:17:49 -0400 | [diff] [blame] | 122 | static ssize_t queue_logical_block_size_show(struct request_queue *q, char *page) |
Martin K. Petersen | e68b903 | 2008-01-29 19:14:08 +0100 | [diff] [blame] | 123 | { |
Martin K. Petersen | e1defc4 | 2009-05-22 17:17:49 -0400 | [diff] [blame] | 124 | return queue_var_show(queue_logical_block_size(q), page); |
Martin K. Petersen | e68b903 | 2008-01-29 19:14:08 +0100 | [diff] [blame] | 125 | } |
| 126 | |
Martin K. Petersen | c72758f | 2009-05-22 17:17:53 -0400 | [diff] [blame] | 127 | static ssize_t queue_physical_block_size_show(struct request_queue *q, char *page) |
| 128 | { |
| 129 | return queue_var_show(queue_physical_block_size(q), page); |
| 130 | } |
| 131 | |
| 132 | static ssize_t queue_io_min_show(struct request_queue *q, char *page) |
| 133 | { |
| 134 | return queue_var_show(queue_io_min(q), page); |
| 135 | } |
| 136 | |
| 137 | static ssize_t queue_io_opt_show(struct request_queue *q, char *page) |
| 138 | { |
| 139 | return queue_var_show(queue_io_opt(q), page); |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 140 | } |
| 141 | |
Martin K. Petersen | 86b3728 | 2009-11-10 11:50:21 +0100 | [diff] [blame] | 142 | static ssize_t queue_discard_granularity_show(struct request_queue *q, char *page) |
| 143 | { |
| 144 | return queue_var_show(q->limits.discard_granularity, page); |
| 145 | } |
| 146 | |
| 147 | static ssize_t queue_discard_max_show(struct request_queue *q, char *page) |
| 148 | { |
Martin K. Petersen | a934a00 | 2011-05-18 10:37:35 +0200 | [diff] [blame] | 149 | return sprintf(page, "%llu\n", |
| 150 | (unsigned long long)q->limits.max_discard_sectors << 9); |
Martin K. Petersen | 86b3728 | 2009-11-10 11:50:21 +0100 | [diff] [blame] | 151 | } |
| 152 | |
Martin K. Petersen | 98262f2 | 2009-12-03 09:24:48 +0100 | [diff] [blame] | 153 | static ssize_t queue_discard_zeroes_data_show(struct request_queue *q, char *page) |
| 154 | { |
| 155 | return queue_var_show(queue_discard_zeroes_data(q), page); |
| 156 | } |
| 157 | |
Martin K. Petersen | 4363ac7 | 2012-09-18 12:19:27 -0400 | [diff] [blame] | 158 | static ssize_t queue_write_same_max_show(struct request_queue *q, char *page) |
| 159 | { |
| 160 | return sprintf(page, "%llu\n", |
| 161 | (unsigned long long)q->limits.max_write_same_sectors << 9); |
| 162 | } |
| 163 | |
| 164 | |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 165 | static ssize_t |
| 166 | queue_max_sectors_store(struct request_queue *q, const char *page, size_t count) |
| 167 | { |
| 168 | unsigned long max_sectors_kb, |
Martin K. Petersen | ae03bf6 | 2009-05-22 17:17:50 -0400 | [diff] [blame] | 169 | max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1, |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 170 | page_kb = 1 << (PAGE_CACHE_SHIFT - 10); |
| 171 | ssize_t ret = queue_var_store(&max_sectors_kb, page, count); |
| 172 | |
Dave Reisner | b1f3b64 | 2012-09-08 11:55:45 -0400 | [diff] [blame] | 173 | if (ret < 0) |
| 174 | return ret; |
| 175 | |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 176 | if (max_sectors_kb > max_hw_sectors_kb || max_sectors_kb < page_kb) |
| 177 | return -EINVAL; |
Wu Fengguang | 7c23951 | 2008-11-25 09:08:39 +0100 | [diff] [blame] | 178 | |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 179 | spin_lock_irq(q->queue_lock); |
Nikanth Karthikesan | c295fc0 | 2009-09-01 22:40:15 +0200 | [diff] [blame] | 180 | q->limits.max_sectors = max_sectors_kb << 1; |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 181 | spin_unlock_irq(q->queue_lock); |
| 182 | |
| 183 | return ret; |
| 184 | } |
| 185 | |
| 186 | static ssize_t queue_max_hw_sectors_show(struct request_queue *q, char *page) |
| 187 | { |
Martin K. Petersen | ae03bf6 | 2009-05-22 17:17:50 -0400 | [diff] [blame] | 188 | int max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1; |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 189 | |
| 190 | return queue_var_show(max_hw_sectors_kb, (page)); |
| 191 | } |
| 192 | |
Jens Axboe | 956bcb7 | 2010-08-07 18:13:50 +0200 | [diff] [blame] | 193 | #define QUEUE_SYSFS_BIT_FNS(name, flag, neg) \ |
| 194 | static ssize_t \ |
| 195 | queue_show_##name(struct request_queue *q, char *page) \ |
| 196 | { \ |
| 197 | int bit; \ |
| 198 | bit = test_bit(QUEUE_FLAG_##flag, &q->queue_flags); \ |
| 199 | return queue_var_show(neg ? !bit : bit, page); \ |
| 200 | } \ |
| 201 | static ssize_t \ |
| 202 | queue_store_##name(struct request_queue *q, const char *page, size_t count) \ |
| 203 | { \ |
| 204 | unsigned long val; \ |
| 205 | ssize_t ret; \ |
| 206 | ret = queue_var_store(&val, page, count); \ |
Arnd Bergmann | c678ef5 | 2013-04-03 21:53:57 +0200 | [diff] [blame] | 207 | if (ret < 0) \ |
| 208 | return ret; \ |
Jens Axboe | 956bcb7 | 2010-08-07 18:13:50 +0200 | [diff] [blame] | 209 | if (neg) \ |
| 210 | val = !val; \ |
| 211 | \ |
| 212 | spin_lock_irq(q->queue_lock); \ |
| 213 | if (val) \ |
| 214 | queue_flag_set(QUEUE_FLAG_##flag, q); \ |
| 215 | else \ |
| 216 | queue_flag_clear(QUEUE_FLAG_##flag, q); \ |
| 217 | spin_unlock_irq(q->queue_lock); \ |
| 218 | return ret; \ |
Bartlomiej Zolnierkiewicz | 1308835f | 2009-01-07 12:22:39 +0100 | [diff] [blame] | 219 | } |
| 220 | |
Jens Axboe | 956bcb7 | 2010-08-07 18:13:50 +0200 | [diff] [blame] | 221 | QUEUE_SYSFS_BIT_FNS(nonrot, NONROT, 1); |
| 222 | QUEUE_SYSFS_BIT_FNS(random, ADD_RANDOM, 0); |
| 223 | QUEUE_SYSFS_BIT_FNS(iostats, IO_STAT, 0); |
| 224 | #undef QUEUE_SYSFS_BIT_FNS |
Bartlomiej Zolnierkiewicz | 1308835f | 2009-01-07 12:22:39 +0100 | [diff] [blame] | 225 | |
Alan D. Brunelle | ac9fafa | 2008-04-29 14:44:19 +0200 | [diff] [blame] | 226 | static ssize_t queue_nomerges_show(struct request_queue *q, char *page) |
| 227 | { |
Alan D. Brunelle | 488991e | 2010-01-29 09:04:08 +0100 | [diff] [blame] | 228 | return queue_var_show((blk_queue_nomerges(q) << 1) | |
| 229 | blk_queue_noxmerges(q), page); |
Alan D. Brunelle | ac9fafa | 2008-04-29 14:44:19 +0200 | [diff] [blame] | 230 | } |
| 231 | |
| 232 | static ssize_t queue_nomerges_store(struct request_queue *q, const char *page, |
| 233 | size_t count) |
| 234 | { |
| 235 | unsigned long nm; |
| 236 | ssize_t ret = queue_var_store(&nm, page, count); |
| 237 | |
Dave Reisner | b1f3b64 | 2012-09-08 11:55:45 -0400 | [diff] [blame] | 238 | if (ret < 0) |
| 239 | return ret; |
| 240 | |
Jens Axboe | bf0f970 | 2008-05-07 09:09:39 +0200 | [diff] [blame] | 241 | spin_lock_irq(q->queue_lock); |
Alan D. Brunelle | 488991e | 2010-01-29 09:04:08 +0100 | [diff] [blame] | 242 | queue_flag_clear(QUEUE_FLAG_NOMERGES, q); |
| 243 | queue_flag_clear(QUEUE_FLAG_NOXMERGES, q); |
| 244 | if (nm == 2) |
Jens Axboe | bf0f970 | 2008-05-07 09:09:39 +0200 | [diff] [blame] | 245 | queue_flag_set(QUEUE_FLAG_NOMERGES, q); |
Alan D. Brunelle | 488991e | 2010-01-29 09:04:08 +0100 | [diff] [blame] | 246 | else if (nm) |
| 247 | queue_flag_set(QUEUE_FLAG_NOXMERGES, q); |
Jens Axboe | bf0f970 | 2008-05-07 09:09:39 +0200 | [diff] [blame] | 248 | spin_unlock_irq(q->queue_lock); |
Bartlomiej Zolnierkiewicz | 1308835f | 2009-01-07 12:22:39 +0100 | [diff] [blame] | 249 | |
Alan D. Brunelle | ac9fafa | 2008-04-29 14:44:19 +0200 | [diff] [blame] | 250 | return ret; |
| 251 | } |
| 252 | |
Jens Axboe | c7c22e4 | 2008-09-13 20:26:01 +0200 | [diff] [blame] | 253 | static ssize_t queue_rq_affinity_show(struct request_queue *q, char *page) |
| 254 | { |
Xiaotian Feng | 9cb308c | 2009-07-17 15:26:26 +0800 | [diff] [blame] | 255 | bool set = test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags); |
Dan Williams | 5757a6d | 2011-07-23 20:44:25 +0200 | [diff] [blame] | 256 | bool force = test_bit(QUEUE_FLAG_SAME_FORCE, &q->queue_flags); |
Jens Axboe | c7c22e4 | 2008-09-13 20:26:01 +0200 | [diff] [blame] | 257 | |
Dan Williams | 5757a6d | 2011-07-23 20:44:25 +0200 | [diff] [blame] | 258 | return queue_var_show(set << force, page); |
Jens Axboe | c7c22e4 | 2008-09-13 20:26:01 +0200 | [diff] [blame] | 259 | } |
| 260 | |
| 261 | static ssize_t |
| 262 | queue_rq_affinity_store(struct request_queue *q, const char *page, size_t count) |
| 263 | { |
| 264 | ssize_t ret = -EINVAL; |
Christoph Hellwig | 0a06ff0 | 2013-11-14 14:32:07 -0800 | [diff] [blame] | 265 | #ifdef CONFIG_SMP |
Jens Axboe | c7c22e4 | 2008-09-13 20:26:01 +0200 | [diff] [blame] | 266 | unsigned long val; |
| 267 | |
| 268 | ret = queue_var_store(&val, page, count); |
Dave Reisner | b1f3b64 | 2012-09-08 11:55:45 -0400 | [diff] [blame] | 269 | if (ret < 0) |
| 270 | return ret; |
| 271 | |
Jens Axboe | c7c22e4 | 2008-09-13 20:26:01 +0200 | [diff] [blame] | 272 | spin_lock_irq(q->queue_lock); |
Eric Seppanen | e8037d4 | 2011-08-23 21:25:12 +0200 | [diff] [blame] | 273 | if (val == 2) { |
Jens Axboe | c7c22e4 | 2008-09-13 20:26:01 +0200 | [diff] [blame] | 274 | queue_flag_set(QUEUE_FLAG_SAME_COMP, q); |
Eric Seppanen | e8037d4 | 2011-08-23 21:25:12 +0200 | [diff] [blame] | 275 | queue_flag_set(QUEUE_FLAG_SAME_FORCE, q); |
| 276 | } else if (val == 1) { |
| 277 | queue_flag_set(QUEUE_FLAG_SAME_COMP, q); |
| 278 | queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q); |
| 279 | } else if (val == 0) { |
Dan Williams | 5757a6d | 2011-07-23 20:44:25 +0200 | [diff] [blame] | 280 | queue_flag_clear(QUEUE_FLAG_SAME_COMP, q); |
| 281 | queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q); |
| 282 | } |
Jens Axboe | c7c22e4 | 2008-09-13 20:26:01 +0200 | [diff] [blame] | 283 | spin_unlock_irq(q->queue_lock); |
| 284 | #endif |
| 285 | return ret; |
| 286 | } |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 287 | |
| 288 | static struct queue_sysfs_entry queue_requests_entry = { |
| 289 | .attr = {.name = "nr_requests", .mode = S_IRUGO | S_IWUSR }, |
| 290 | .show = queue_requests_show, |
| 291 | .store = queue_requests_store, |
| 292 | }; |
| 293 | |
| 294 | static struct queue_sysfs_entry queue_ra_entry = { |
| 295 | .attr = {.name = "read_ahead_kb", .mode = S_IRUGO | S_IWUSR }, |
| 296 | .show = queue_ra_show, |
| 297 | .store = queue_ra_store, |
| 298 | }; |
| 299 | |
| 300 | static struct queue_sysfs_entry queue_max_sectors_entry = { |
| 301 | .attr = {.name = "max_sectors_kb", .mode = S_IRUGO | S_IWUSR }, |
| 302 | .show = queue_max_sectors_show, |
| 303 | .store = queue_max_sectors_store, |
| 304 | }; |
| 305 | |
| 306 | static struct queue_sysfs_entry queue_max_hw_sectors_entry = { |
| 307 | .attr = {.name = "max_hw_sectors_kb", .mode = S_IRUGO }, |
| 308 | .show = queue_max_hw_sectors_show, |
| 309 | }; |
| 310 | |
Martin K. Petersen | c77a571 | 2010-03-10 00:48:33 -0500 | [diff] [blame] | 311 | static struct queue_sysfs_entry queue_max_segments_entry = { |
| 312 | .attr = {.name = "max_segments", .mode = S_IRUGO }, |
| 313 | .show = queue_max_segments_show, |
| 314 | }; |
| 315 | |
Martin K. Petersen | 13f05c8 | 2010-09-10 20:50:10 +0200 | [diff] [blame] | 316 | static struct queue_sysfs_entry queue_max_integrity_segments_entry = { |
| 317 | .attr = {.name = "max_integrity_segments", .mode = S_IRUGO }, |
| 318 | .show = queue_max_integrity_segments_show, |
| 319 | }; |
| 320 | |
Martin K. Petersen | c77a571 | 2010-03-10 00:48:33 -0500 | [diff] [blame] | 321 | static struct queue_sysfs_entry queue_max_segment_size_entry = { |
| 322 | .attr = {.name = "max_segment_size", .mode = S_IRUGO }, |
| 323 | .show = queue_max_segment_size_show, |
| 324 | }; |
| 325 | |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 326 | static struct queue_sysfs_entry queue_iosched_entry = { |
| 327 | .attr = {.name = "scheduler", .mode = S_IRUGO | S_IWUSR }, |
| 328 | .show = elv_iosched_show, |
| 329 | .store = elv_iosched_store, |
| 330 | }; |
| 331 | |
Martin K. Petersen | e68b903 | 2008-01-29 19:14:08 +0100 | [diff] [blame] | 332 | static struct queue_sysfs_entry queue_hw_sector_size_entry = { |
| 333 | .attr = {.name = "hw_sector_size", .mode = S_IRUGO }, |
Martin K. Petersen | e1defc4 | 2009-05-22 17:17:49 -0400 | [diff] [blame] | 334 | .show = queue_logical_block_size_show, |
| 335 | }; |
| 336 | |
| 337 | static struct queue_sysfs_entry queue_logical_block_size_entry = { |
| 338 | .attr = {.name = "logical_block_size", .mode = S_IRUGO }, |
| 339 | .show = queue_logical_block_size_show, |
Martin K. Petersen | e68b903 | 2008-01-29 19:14:08 +0100 | [diff] [blame] | 340 | }; |
| 341 | |
Martin K. Petersen | c72758f | 2009-05-22 17:17:53 -0400 | [diff] [blame] | 342 | static struct queue_sysfs_entry queue_physical_block_size_entry = { |
| 343 | .attr = {.name = "physical_block_size", .mode = S_IRUGO }, |
| 344 | .show = queue_physical_block_size_show, |
| 345 | }; |
| 346 | |
| 347 | static struct queue_sysfs_entry queue_io_min_entry = { |
| 348 | .attr = {.name = "minimum_io_size", .mode = S_IRUGO }, |
| 349 | .show = queue_io_min_show, |
| 350 | }; |
| 351 | |
| 352 | static struct queue_sysfs_entry queue_io_opt_entry = { |
| 353 | .attr = {.name = "optimal_io_size", .mode = S_IRUGO }, |
| 354 | .show = queue_io_opt_show, |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 355 | }; |
| 356 | |
Martin K. Petersen | 86b3728 | 2009-11-10 11:50:21 +0100 | [diff] [blame] | 357 | static struct queue_sysfs_entry queue_discard_granularity_entry = { |
| 358 | .attr = {.name = "discard_granularity", .mode = S_IRUGO }, |
| 359 | .show = queue_discard_granularity_show, |
| 360 | }; |
| 361 | |
| 362 | static struct queue_sysfs_entry queue_discard_max_entry = { |
| 363 | .attr = {.name = "discard_max_bytes", .mode = S_IRUGO }, |
| 364 | .show = queue_discard_max_show, |
| 365 | }; |
| 366 | |
Martin K. Petersen | 98262f2 | 2009-12-03 09:24:48 +0100 | [diff] [blame] | 367 | static struct queue_sysfs_entry queue_discard_zeroes_data_entry = { |
| 368 | .attr = {.name = "discard_zeroes_data", .mode = S_IRUGO }, |
| 369 | .show = queue_discard_zeroes_data_show, |
| 370 | }; |
| 371 | |
Martin K. Petersen | 4363ac7 | 2012-09-18 12:19:27 -0400 | [diff] [blame] | 372 | static struct queue_sysfs_entry queue_write_same_max_entry = { |
| 373 | .attr = {.name = "write_same_max_bytes", .mode = S_IRUGO }, |
| 374 | .show = queue_write_same_max_show, |
| 375 | }; |
| 376 | |
Bartlomiej Zolnierkiewicz | 1308835f | 2009-01-07 12:22:39 +0100 | [diff] [blame] | 377 | static struct queue_sysfs_entry queue_nonrot_entry = { |
| 378 | .attr = {.name = "rotational", .mode = S_IRUGO | S_IWUSR }, |
Jens Axboe | 956bcb7 | 2010-08-07 18:13:50 +0200 | [diff] [blame] | 379 | .show = queue_show_nonrot, |
| 380 | .store = queue_store_nonrot, |
Bartlomiej Zolnierkiewicz | 1308835f | 2009-01-07 12:22:39 +0100 | [diff] [blame] | 381 | }; |
| 382 | |
Alan D. Brunelle | ac9fafa | 2008-04-29 14:44:19 +0200 | [diff] [blame] | 383 | static struct queue_sysfs_entry queue_nomerges_entry = { |
| 384 | .attr = {.name = "nomerges", .mode = S_IRUGO | S_IWUSR }, |
| 385 | .show = queue_nomerges_show, |
| 386 | .store = queue_nomerges_store, |
| 387 | }; |
| 388 | |
Jens Axboe | c7c22e4 | 2008-09-13 20:26:01 +0200 | [diff] [blame] | 389 | static struct queue_sysfs_entry queue_rq_affinity_entry = { |
| 390 | .attr = {.name = "rq_affinity", .mode = S_IRUGO | S_IWUSR }, |
| 391 | .show = queue_rq_affinity_show, |
| 392 | .store = queue_rq_affinity_store, |
| 393 | }; |
| 394 | |
Jens Axboe | bc58ba9 | 2009-01-23 10:54:44 +0100 | [diff] [blame] | 395 | static struct queue_sysfs_entry queue_iostats_entry = { |
| 396 | .attr = {.name = "iostats", .mode = S_IRUGO | S_IWUSR }, |
Jens Axboe | 956bcb7 | 2010-08-07 18:13:50 +0200 | [diff] [blame] | 397 | .show = queue_show_iostats, |
| 398 | .store = queue_store_iostats, |
Jens Axboe | bc58ba9 | 2009-01-23 10:54:44 +0100 | [diff] [blame] | 399 | }; |
| 400 | |
Jens Axboe | e2e1a14 | 2010-06-09 10:42:09 +0200 | [diff] [blame] | 401 | static struct queue_sysfs_entry queue_random_entry = { |
| 402 | .attr = {.name = "add_random", .mode = S_IRUGO | S_IWUSR }, |
Jens Axboe | 956bcb7 | 2010-08-07 18:13:50 +0200 | [diff] [blame] | 403 | .show = queue_show_random, |
| 404 | .store = queue_store_random, |
Jens Axboe | e2e1a14 | 2010-06-09 10:42:09 +0200 | [diff] [blame] | 405 | }; |
| 406 | |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 407 | static struct attribute *default_attrs[] = { |
| 408 | &queue_requests_entry.attr, |
| 409 | &queue_ra_entry.attr, |
| 410 | &queue_max_hw_sectors_entry.attr, |
| 411 | &queue_max_sectors_entry.attr, |
Martin K. Petersen | c77a571 | 2010-03-10 00:48:33 -0500 | [diff] [blame] | 412 | &queue_max_segments_entry.attr, |
Martin K. Petersen | 13f05c8 | 2010-09-10 20:50:10 +0200 | [diff] [blame] | 413 | &queue_max_integrity_segments_entry.attr, |
Martin K. Petersen | c77a571 | 2010-03-10 00:48:33 -0500 | [diff] [blame] | 414 | &queue_max_segment_size_entry.attr, |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 415 | &queue_iosched_entry.attr, |
Martin K. Petersen | e68b903 | 2008-01-29 19:14:08 +0100 | [diff] [blame] | 416 | &queue_hw_sector_size_entry.attr, |
Martin K. Petersen | e1defc4 | 2009-05-22 17:17:49 -0400 | [diff] [blame] | 417 | &queue_logical_block_size_entry.attr, |
Martin K. Petersen | c72758f | 2009-05-22 17:17:53 -0400 | [diff] [blame] | 418 | &queue_physical_block_size_entry.attr, |
| 419 | &queue_io_min_entry.attr, |
| 420 | &queue_io_opt_entry.attr, |
Martin K. Petersen | 86b3728 | 2009-11-10 11:50:21 +0100 | [diff] [blame] | 421 | &queue_discard_granularity_entry.attr, |
| 422 | &queue_discard_max_entry.attr, |
Martin K. Petersen | 98262f2 | 2009-12-03 09:24:48 +0100 | [diff] [blame] | 423 | &queue_discard_zeroes_data_entry.attr, |
Martin K. Petersen | 4363ac7 | 2012-09-18 12:19:27 -0400 | [diff] [blame] | 424 | &queue_write_same_max_entry.attr, |
Bartlomiej Zolnierkiewicz | 1308835f | 2009-01-07 12:22:39 +0100 | [diff] [blame] | 425 | &queue_nonrot_entry.attr, |
Alan D. Brunelle | ac9fafa | 2008-04-29 14:44:19 +0200 | [diff] [blame] | 426 | &queue_nomerges_entry.attr, |
Jens Axboe | c7c22e4 | 2008-09-13 20:26:01 +0200 | [diff] [blame] | 427 | &queue_rq_affinity_entry.attr, |
Jens Axboe | bc58ba9 | 2009-01-23 10:54:44 +0100 | [diff] [blame] | 428 | &queue_iostats_entry.attr, |
Jens Axboe | e2e1a14 | 2010-06-09 10:42:09 +0200 | [diff] [blame] | 429 | &queue_random_entry.attr, |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 430 | NULL, |
| 431 | }; |
| 432 | |
| 433 | #define to_queue(atr) container_of((atr), struct queue_sysfs_entry, attr) |
| 434 | |
| 435 | static ssize_t |
| 436 | queue_attr_show(struct kobject *kobj, struct attribute *attr, char *page) |
| 437 | { |
| 438 | struct queue_sysfs_entry *entry = to_queue(attr); |
| 439 | struct request_queue *q = |
| 440 | container_of(kobj, struct request_queue, kobj); |
| 441 | ssize_t res; |
| 442 | |
| 443 | if (!entry->show) |
| 444 | return -EIO; |
| 445 | mutex_lock(&q->sysfs_lock); |
Bart Van Assche | 3f3299d | 2012-11-28 13:42:38 +0100 | [diff] [blame] | 446 | if (blk_queue_dying(q)) { |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 447 | mutex_unlock(&q->sysfs_lock); |
| 448 | return -ENOENT; |
| 449 | } |
| 450 | res = entry->show(q, page); |
| 451 | mutex_unlock(&q->sysfs_lock); |
| 452 | return res; |
| 453 | } |
| 454 | |
| 455 | static ssize_t |
| 456 | queue_attr_store(struct kobject *kobj, struct attribute *attr, |
| 457 | const char *page, size_t length) |
| 458 | { |
| 459 | struct queue_sysfs_entry *entry = to_queue(attr); |
Jens Axboe | 6728cb0 | 2008-01-31 13:03:55 +0100 | [diff] [blame] | 460 | struct request_queue *q; |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 461 | ssize_t res; |
| 462 | |
| 463 | if (!entry->store) |
| 464 | return -EIO; |
Jens Axboe | 6728cb0 | 2008-01-31 13:03:55 +0100 | [diff] [blame] | 465 | |
| 466 | q = container_of(kobj, struct request_queue, kobj); |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 467 | mutex_lock(&q->sysfs_lock); |
Bart Van Assche | 3f3299d | 2012-11-28 13:42:38 +0100 | [diff] [blame] | 468 | if (blk_queue_dying(q)) { |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 469 | mutex_unlock(&q->sysfs_lock); |
| 470 | return -ENOENT; |
| 471 | } |
| 472 | res = entry->store(q, page, length); |
| 473 | mutex_unlock(&q->sysfs_lock); |
| 474 | return res; |
| 475 | } |
| 476 | |
Tejun Heo | 548bc8e | 2013-01-09 08:05:13 -0800 | [diff] [blame] | 477 | static void blk_free_queue_rcu(struct rcu_head *rcu_head) |
| 478 | { |
| 479 | struct request_queue *q = container_of(rcu_head, struct request_queue, |
| 480 | rcu_head); |
| 481 | kmem_cache_free(blk_requestq_cachep, q); |
| 482 | } |
| 483 | |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 484 | /** |
Andrew Morton | 499337b | 2011-09-21 10:01:22 +0200 | [diff] [blame] | 485 | * blk_release_queue: - release a &struct request_queue when it is no longer needed |
| 486 | * @kobj: the kobj belonging to the request queue to be released |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 487 | * |
| 488 | * Description: |
Andrew Morton | 499337b | 2011-09-21 10:01:22 +0200 | [diff] [blame] | 489 | * blk_release_queue is the pair to blk_init_queue() or |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 490 | * blk_queue_make_request(). It should be called when a request queue is |
| 491 | * being released; typically when a block device is being de-registered. |
| 492 | * Currently, its primary task it to free all the &struct request |
| 493 | * structures that were allocated to the queue and the queue itself. |
| 494 | * |
| 495 | * Caveat: |
| 496 | * Hopefully the low level driver will have finished any |
| 497 | * outstanding requests first... |
| 498 | **/ |
| 499 | static void blk_release_queue(struct kobject *kobj) |
| 500 | { |
| 501 | struct request_queue *q = |
| 502 | container_of(kobj, struct request_queue, kobj); |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 503 | |
| 504 | blk_sync_queue(q); |
| 505 | |
Tejun Heo | e8989fa | 2012-03-05 13:15:20 -0800 | [diff] [blame] | 506 | blkcg_exit_queue(q); |
| 507 | |
Tejun Heo | 7e5a879 | 2011-12-14 00:33:42 +0100 | [diff] [blame] | 508 | if (q->elevator) { |
| 509 | spin_lock_irq(q->queue_lock); |
| 510 | ioc_clear_queue(q); |
| 511 | spin_unlock_irq(q->queue_lock); |
Hannes Reinecke | 777eb1b | 2011-09-28 08:07:01 -0600 | [diff] [blame] | 512 | elevator_exit(q->elevator); |
Tejun Heo | 7e5a879 | 2011-12-14 00:33:42 +0100 | [diff] [blame] | 513 | } |
Hannes Reinecke | 777eb1b | 2011-09-28 08:07:01 -0600 | [diff] [blame] | 514 | |
Tejun Heo | a051661 | 2012-06-26 15:05:44 -0700 | [diff] [blame] | 515 | blk_exit_rl(&q->root_rl); |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 516 | |
| 517 | if (q->queue_tags) |
| 518 | __blk_queue_free_tags(q); |
| 519 | |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 520 | if (q->mq_ops) |
| 521 | blk_mq_free_queue(q); |
Ming Lei | f70ced0 | 2014-09-25 23:23:47 +0800 | [diff] [blame] | 522 | else |
| 523 | blk_free_flush_queue(q->fq); |
Christoph Hellwig | 1874198 | 2014-02-10 09:29:00 -0700 | [diff] [blame] | 524 | |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 525 | blk_trace_shutdown(q); |
| 526 | |
| 527 | bdi_destroy(&q->backing_dev_info); |
Tejun Heo | a73f730 | 2011-12-14 00:33:37 +0100 | [diff] [blame] | 528 | |
| 529 | ida_simple_remove(&blk_queue_ida, q->id); |
Tejun Heo | 548bc8e | 2013-01-09 08:05:13 -0800 | [diff] [blame] | 530 | call_rcu(&q->rcu_head, blk_free_queue_rcu); |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 531 | } |
| 532 | |
Emese Revfy | 52cf25d | 2010-01-19 02:58:23 +0100 | [diff] [blame] | 533 | static const struct sysfs_ops queue_sysfs_ops = { |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 534 | .show = queue_attr_show, |
| 535 | .store = queue_attr_store, |
| 536 | }; |
| 537 | |
| 538 | struct kobj_type blk_queue_ktype = { |
| 539 | .sysfs_ops = &queue_sysfs_ops, |
| 540 | .default_attrs = default_attrs, |
| 541 | .release = blk_release_queue, |
| 542 | }; |
| 543 | |
| 544 | int blk_register_queue(struct gendisk *disk) |
| 545 | { |
| 546 | int ret; |
Li Zefan | 1d54ad6 | 2009-04-14 14:00:05 +0800 | [diff] [blame] | 547 | struct device *dev = disk_to_dev(disk); |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 548 | struct request_queue *q = disk->queue; |
| 549 | |
Akinobu Mita | fb19974 | 2008-04-21 09:51:06 +0200 | [diff] [blame] | 550 | if (WARN_ON(!q)) |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 551 | return -ENXIO; |
| 552 | |
Tejun Heo | 749fefe | 2012-09-20 14:08:52 -0700 | [diff] [blame] | 553 | /* |
Tejun Heo | 17497ac | 2014-09-24 13:31:50 -0400 | [diff] [blame] | 554 | * SCSI probing may synchronously create and destroy a lot of |
| 555 | * request_queues for non-existent devices. Shutting down a fully |
| 556 | * functional queue takes measureable wallclock time as RCU grace |
| 557 | * periods are involved. To avoid excessive latency in these |
| 558 | * cases, a request_queue starts out in a degraded mode which is |
| 559 | * faster to shut down and is made fully functional here as |
| 560 | * request_queues for non-existent devices never get registered. |
Tejun Heo | 749fefe | 2012-09-20 14:08:52 -0700 | [diff] [blame] | 561 | */ |
Alan Stern | df35c7c | 2014-09-09 11:50:58 -0400 | [diff] [blame] | 562 | if (!blk_queue_init_done(q)) { |
| 563 | queue_flag_set_unlocked(QUEUE_FLAG_INIT_DONE, q); |
| 564 | blk_queue_bypass_end(q); |
Tejun Heo | 17497ac | 2014-09-24 13:31:50 -0400 | [diff] [blame] | 565 | if (q->mq_ops) |
| 566 | blk_mq_finish_init(q); |
Alan Stern | df35c7c | 2014-09-09 11:50:58 -0400 | [diff] [blame] | 567 | } |
Tejun Heo | 749fefe | 2012-09-20 14:08:52 -0700 | [diff] [blame] | 568 | |
Li Zefan | 1d54ad6 | 2009-04-14 14:00:05 +0800 | [diff] [blame] | 569 | ret = blk_trace_init_sysfs(dev); |
| 570 | if (ret) |
| 571 | return ret; |
| 572 | |
Linus Torvalds | c905959 | 2009-06-11 10:52:27 -0700 | [diff] [blame] | 573 | ret = kobject_add(&q->kobj, kobject_get(&dev->kobj), "%s", "queue"); |
Liu Yuan | ed5302d | 2011-04-19 13:47:58 +0200 | [diff] [blame] | 574 | if (ret < 0) { |
| 575 | blk_trace_remove_sysfs(dev); |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 576 | return ret; |
Liu Yuan | ed5302d | 2011-04-19 13:47:58 +0200 | [diff] [blame] | 577 | } |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 578 | |
| 579 | kobject_uevent(&q->kobj, KOBJ_ADD); |
| 580 | |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 581 | if (q->mq_ops) |
| 582 | blk_mq_register_disk(disk); |
| 583 | |
Martin K. Petersen | cd43e26 | 2009-05-22 17:17:52 -0400 | [diff] [blame] | 584 | if (!q->request_fn) |
| 585 | return 0; |
| 586 | |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 587 | ret = elv_register_queue(q); |
| 588 | if (ret) { |
| 589 | kobject_uevent(&q->kobj, KOBJ_REMOVE); |
| 590 | kobject_del(&q->kobj); |
Liu Yuan | 80656b6 | 2011-04-13 22:14:54 +0200 | [diff] [blame] | 591 | blk_trace_remove_sysfs(dev); |
Xiaotian Feng | c87ffbb | 2010-08-23 12:30:29 +0200 | [diff] [blame] | 592 | kobject_put(&dev->kobj); |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 593 | return ret; |
| 594 | } |
| 595 | |
| 596 | return 0; |
| 597 | } |
| 598 | |
| 599 | void blk_unregister_queue(struct gendisk *disk) |
| 600 | { |
| 601 | struct request_queue *q = disk->queue; |
| 602 | |
Akinobu Mita | fb19974 | 2008-04-21 09:51:06 +0200 | [diff] [blame] | 603 | if (WARN_ON(!q)) |
| 604 | return; |
| 605 | |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 606 | if (q->mq_ops) |
| 607 | blk_mq_unregister_disk(disk); |
| 608 | |
Zdenek Kabelac | 48c0d4d | 2009-09-25 06:19:26 +0200 | [diff] [blame] | 609 | if (q->request_fn) |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 610 | elv_unregister_queue(q); |
| 611 | |
Zdenek Kabelac | 48c0d4d | 2009-09-25 06:19:26 +0200 | [diff] [blame] | 612 | kobject_uevent(&q->kobj, KOBJ_REMOVE); |
| 613 | kobject_del(&q->kobj); |
| 614 | blk_trace_remove_sysfs(disk_to_dev(disk)); |
| 615 | kobject_put(&disk_to_dev(disk)->kobj); |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 616 | } |