blob: ea8c3f58afbdc69fb65430a27bdbd340f1902383 [file] [log] [blame]
Jens Axboe320ae512013-10-24 09:20:05 +01001#include <linux/kernel.h>
2#include <linux/module.h>
3#include <linux/backing-dev.h>
4#include <linux/bio.h>
5#include <linux/blkdev.h>
6#include <linux/mm.h>
7#include <linux/init.h>
8#include <linux/slab.h>
9#include <linux/workqueue.h>
10#include <linux/smp.h>
11
12#include <linux/blk-mq.h>
13#include "blk-mq.h"
14#include "blk-mq-tag.h"
15
16static void blk_mq_sysfs_release(struct kobject *kobj)
17{
18}
19
20struct blk_mq_ctx_sysfs_entry {
21 struct attribute attr;
22 ssize_t (*show)(struct blk_mq_ctx *, char *);
23 ssize_t (*store)(struct blk_mq_ctx *, const char *, size_t);
24};
25
26struct blk_mq_hw_ctx_sysfs_entry {
27 struct attribute attr;
28 ssize_t (*show)(struct blk_mq_hw_ctx *, char *);
29 ssize_t (*store)(struct blk_mq_hw_ctx *, const char *, size_t);
30};
31
32static ssize_t blk_mq_sysfs_show(struct kobject *kobj, struct attribute *attr,
33 char *page)
34{
35 struct blk_mq_ctx_sysfs_entry *entry;
36 struct blk_mq_ctx *ctx;
37 struct request_queue *q;
38 ssize_t res;
39
40 entry = container_of(attr, struct blk_mq_ctx_sysfs_entry, attr);
41 ctx = container_of(kobj, struct blk_mq_ctx, kobj);
42 q = ctx->queue;
43
44 if (!entry->show)
45 return -EIO;
46
47 res = -ENOENT;
48 mutex_lock(&q->sysfs_lock);
49 if (!blk_queue_dying(q))
50 res = entry->show(ctx, page);
51 mutex_unlock(&q->sysfs_lock);
52 return res;
53}
54
55static ssize_t blk_mq_sysfs_store(struct kobject *kobj, struct attribute *attr,
56 const char *page, size_t length)
57{
58 struct blk_mq_ctx_sysfs_entry *entry;
59 struct blk_mq_ctx *ctx;
60 struct request_queue *q;
61 ssize_t res;
62
63 entry = container_of(attr, struct blk_mq_ctx_sysfs_entry, attr);
64 ctx = container_of(kobj, struct blk_mq_ctx, kobj);
65 q = ctx->queue;
66
67 if (!entry->store)
68 return -EIO;
69
70 res = -ENOENT;
71 mutex_lock(&q->sysfs_lock);
72 if (!blk_queue_dying(q))
73 res = entry->store(ctx, page, length);
74 mutex_unlock(&q->sysfs_lock);
75 return res;
76}
77
78static ssize_t blk_mq_hw_sysfs_show(struct kobject *kobj,
79 struct attribute *attr, char *page)
80{
81 struct blk_mq_hw_ctx_sysfs_entry *entry;
82 struct blk_mq_hw_ctx *hctx;
83 struct request_queue *q;
84 ssize_t res;
85
86 entry = container_of(attr, struct blk_mq_hw_ctx_sysfs_entry, attr);
87 hctx = container_of(kobj, struct blk_mq_hw_ctx, kobj);
88 q = hctx->queue;
89
90 if (!entry->show)
91 return -EIO;
92
93 res = -ENOENT;
94 mutex_lock(&q->sysfs_lock);
95 if (!blk_queue_dying(q))
96 res = entry->show(hctx, page);
97 mutex_unlock(&q->sysfs_lock);
98 return res;
99}
100
101static ssize_t blk_mq_hw_sysfs_store(struct kobject *kobj,
102 struct attribute *attr, const char *page,
103 size_t length)
104{
105 struct blk_mq_hw_ctx_sysfs_entry *entry;
106 struct blk_mq_hw_ctx *hctx;
107 struct request_queue *q;
108 ssize_t res;
109
110 entry = container_of(attr, struct blk_mq_hw_ctx_sysfs_entry, attr);
111 hctx = container_of(kobj, struct blk_mq_hw_ctx, kobj);
112 q = hctx->queue;
113
114 if (!entry->store)
115 return -EIO;
116
117 res = -ENOENT;
118 mutex_lock(&q->sysfs_lock);
119 if (!blk_queue_dying(q))
120 res = entry->store(hctx, page, length);
121 mutex_unlock(&q->sysfs_lock);
122 return res;
123}
124
125static ssize_t blk_mq_sysfs_dispatched_show(struct blk_mq_ctx *ctx, char *page)
126{
127 return sprintf(page, "%lu %lu\n", ctx->rq_dispatched[1],
128 ctx->rq_dispatched[0]);
129}
130
131static ssize_t blk_mq_sysfs_merged_show(struct blk_mq_ctx *ctx, char *page)
132{
133 return sprintf(page, "%lu\n", ctx->rq_merged);
134}
135
136static ssize_t blk_mq_sysfs_completed_show(struct blk_mq_ctx *ctx, char *page)
137{
138 return sprintf(page, "%lu %lu\n", ctx->rq_completed[1],
139 ctx->rq_completed[0]);
140}
141
142static ssize_t sysfs_list_show(char *page, struct list_head *list, char *msg)
143{
Jens Axboe320ae512013-10-24 09:20:05 +0100144 struct request *rq;
Ming Lei596f5aa2015-08-09 03:41:50 -0400145 int len = snprintf(page, PAGE_SIZE - 1, "%s:\n", msg);
Jens Axboe320ae512013-10-24 09:20:05 +0100146
Ming Lei596f5aa2015-08-09 03:41:50 -0400147 list_for_each_entry(rq, list, queuelist) {
148 const int rq_len = 2 * sizeof(rq) + 2;
Jens Axboe320ae512013-10-24 09:20:05 +0100149
Ming Lei596f5aa2015-08-09 03:41:50 -0400150 /* if the output will be truncated */
151 if (PAGE_SIZE - 1 < len + rq_len) {
152 /* backspacing if it can't hold '\t...\n' */
153 if (PAGE_SIZE - 1 < len + 5)
154 len -= rq_len;
155 len += snprintf(page + len, PAGE_SIZE - 1 - len,
156 "\t...\n");
157 break;
158 }
159 len += snprintf(page + len, PAGE_SIZE - 1 - len,
160 "\t%p\n", rq);
161 }
Jens Axboe320ae512013-10-24 09:20:05 +0100162
Ming Lei596f5aa2015-08-09 03:41:50 -0400163 return len;
Jens Axboe320ae512013-10-24 09:20:05 +0100164}
165
166static ssize_t blk_mq_sysfs_rq_list_show(struct blk_mq_ctx *ctx, char *page)
167{
168 ssize_t ret;
169
170 spin_lock(&ctx->lock);
171 ret = sysfs_list_show(page, &ctx->rq_list, "CTX pending");
172 spin_unlock(&ctx->lock);
173
174 return ret;
175}
176
Jens Axboe05229be2015-11-05 10:44:55 -0700177static ssize_t blk_mq_hw_sysfs_poll_show(struct blk_mq_hw_ctx *hctx, char *page)
178{
Stephen Bates6e219352016-09-13 12:23:15 -0600179 return sprintf(page, "considered=%lu, invoked=%lu, success=%lu\n",
180 hctx->poll_considered, hctx->poll_invoked,
181 hctx->poll_success);
Jens Axboe05229be2015-11-05 10:44:55 -0700182}
183
Jens Axboe320ae512013-10-24 09:20:05 +0100184static ssize_t blk_mq_hw_sysfs_queued_show(struct blk_mq_hw_ctx *hctx,
185 char *page)
186{
187 return sprintf(page, "%lu\n", hctx->queued);
188}
189
190static ssize_t blk_mq_hw_sysfs_run_show(struct blk_mq_hw_ctx *hctx, char *page)
191{
192 return sprintf(page, "%lu\n", hctx->run);
193}
194
195static ssize_t blk_mq_hw_sysfs_dispatched_show(struct blk_mq_hw_ctx *hctx,
196 char *page)
197{
198 char *start_page = page;
199 int i;
200
201 page += sprintf(page, "%8u\t%lu\n", 0U, hctx->dispatched[0]);
202
203 for (i = 1; i < BLK_MQ_MAX_DISPATCH_ORDER; i++) {
204 unsigned long d = 1U << (i - 1);
205
206 page += sprintf(page, "%8lu\t%lu\n", d, hctx->dispatched[i]);
207 }
208
209 return page - start_page;
210}
211
212static ssize_t blk_mq_hw_sysfs_rq_list_show(struct blk_mq_hw_ctx *hctx,
213 char *page)
214{
215 ssize_t ret;
216
217 spin_lock(&hctx->lock);
218 ret = sysfs_list_show(page, &hctx->dispatch, "HCTX pending");
219 spin_unlock(&hctx->lock);
220
221 return ret;
222}
223
Jens Axboe320ae512013-10-24 09:20:05 +0100224static ssize_t blk_mq_hw_sysfs_tags_show(struct blk_mq_hw_ctx *hctx, char *page)
225{
226 return blk_mq_tag_sysfs_show(hctx->tags, page);
227}
228
Jens Axboe0d2602c2014-05-13 15:10:52 -0600229static ssize_t blk_mq_hw_sysfs_active_show(struct blk_mq_hw_ctx *hctx, char *page)
230{
231 return sprintf(page, "%u\n", atomic_read(&hctx->nr_active));
232}
233
Jens Axboe676141e2014-03-20 13:29:18 -0600234static ssize_t blk_mq_hw_sysfs_cpus_show(struct blk_mq_hw_ctx *hctx, char *page)
235{
Jens Axboecb2da432014-04-09 10:53:21 -0600236 unsigned int i, first = 1;
Jens Axboe676141e2014-03-20 13:29:18 -0600237 ssize_t ret = 0;
238
Jens Axboecb2da432014-04-09 10:53:21 -0600239 for_each_cpu(i, hctx->cpumask) {
Jens Axboe676141e2014-03-20 13:29:18 -0600240 if (first)
241 ret += sprintf(ret + page, "%u", i);
242 else
243 ret += sprintf(ret + page, ", %u", i);
244
245 first = 0;
246 }
247
Jens Axboe676141e2014-03-20 13:29:18 -0600248 ret += sprintf(ret + page, "\n");
249 return ret;
250}
251
Jens Axboe320ae512013-10-24 09:20:05 +0100252static struct blk_mq_ctx_sysfs_entry blk_mq_sysfs_dispatched = {
253 .attr = {.name = "dispatched", .mode = S_IRUGO },
254 .show = blk_mq_sysfs_dispatched_show,
255};
256static struct blk_mq_ctx_sysfs_entry blk_mq_sysfs_merged = {
257 .attr = {.name = "merged", .mode = S_IRUGO },
258 .show = blk_mq_sysfs_merged_show,
259};
260static struct blk_mq_ctx_sysfs_entry blk_mq_sysfs_completed = {
261 .attr = {.name = "completed", .mode = S_IRUGO },
262 .show = blk_mq_sysfs_completed_show,
263};
264static struct blk_mq_ctx_sysfs_entry blk_mq_sysfs_rq_list = {
265 .attr = {.name = "rq_list", .mode = S_IRUGO },
266 .show = blk_mq_sysfs_rq_list_show,
267};
268
269static struct attribute *default_ctx_attrs[] = {
270 &blk_mq_sysfs_dispatched.attr,
271 &blk_mq_sysfs_merged.attr,
272 &blk_mq_sysfs_completed.attr,
273 &blk_mq_sysfs_rq_list.attr,
274 NULL,
275};
276
277static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_queued = {
278 .attr = {.name = "queued", .mode = S_IRUGO },
279 .show = blk_mq_hw_sysfs_queued_show,
280};
281static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_run = {
282 .attr = {.name = "run", .mode = S_IRUGO },
283 .show = blk_mq_hw_sysfs_run_show,
284};
285static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_dispatched = {
286 .attr = {.name = "dispatched", .mode = S_IRUGO },
287 .show = blk_mq_hw_sysfs_dispatched_show,
288};
Jens Axboe0d2602c2014-05-13 15:10:52 -0600289static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_active = {
290 .attr = {.name = "active", .mode = S_IRUGO },
291 .show = blk_mq_hw_sysfs_active_show,
292};
Jens Axboe320ae512013-10-24 09:20:05 +0100293static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_pending = {
294 .attr = {.name = "pending", .mode = S_IRUGO },
295 .show = blk_mq_hw_sysfs_rq_list_show,
296};
Jens Axboe320ae512013-10-24 09:20:05 +0100297static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_tags = {
298 .attr = {.name = "tags", .mode = S_IRUGO },
299 .show = blk_mq_hw_sysfs_tags_show,
300};
Jens Axboe676141e2014-03-20 13:29:18 -0600301static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_cpus = {
302 .attr = {.name = "cpu_list", .mode = S_IRUGO },
303 .show = blk_mq_hw_sysfs_cpus_show,
304};
Jens Axboe05229be2015-11-05 10:44:55 -0700305static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_poll = {
306 .attr = {.name = "io_poll", .mode = S_IRUGO },
307 .show = blk_mq_hw_sysfs_poll_show,
308};
Jens Axboe320ae512013-10-24 09:20:05 +0100309
310static struct attribute *default_hw_ctx_attrs[] = {
311 &blk_mq_hw_sysfs_queued.attr,
312 &blk_mq_hw_sysfs_run.attr,
313 &blk_mq_hw_sysfs_dispatched.attr,
314 &blk_mq_hw_sysfs_pending.attr,
Jens Axboe320ae512013-10-24 09:20:05 +0100315 &blk_mq_hw_sysfs_tags.attr,
Jens Axboe676141e2014-03-20 13:29:18 -0600316 &blk_mq_hw_sysfs_cpus.attr,
Jens Axboe0d2602c2014-05-13 15:10:52 -0600317 &blk_mq_hw_sysfs_active.attr,
Jens Axboe05229be2015-11-05 10:44:55 -0700318 &blk_mq_hw_sysfs_poll.attr,
Jens Axboe320ae512013-10-24 09:20:05 +0100319 NULL,
320};
321
322static const struct sysfs_ops blk_mq_sysfs_ops = {
323 .show = blk_mq_sysfs_show,
324 .store = blk_mq_sysfs_store,
325};
326
327static const struct sysfs_ops blk_mq_hw_sysfs_ops = {
328 .show = blk_mq_hw_sysfs_show,
329 .store = blk_mq_hw_sysfs_store,
330};
331
332static struct kobj_type blk_mq_ktype = {
333 .sysfs_ops = &blk_mq_sysfs_ops,
334 .release = blk_mq_sysfs_release,
335};
336
337static struct kobj_type blk_mq_ctx_ktype = {
338 .sysfs_ops = &blk_mq_sysfs_ops,
339 .default_attrs = default_ctx_attrs,
Ming Lei74170112015-01-29 20:17:26 +0800340 .release = blk_mq_sysfs_release,
Jens Axboe320ae512013-10-24 09:20:05 +0100341};
342
343static struct kobj_type blk_mq_hw_ktype = {
344 .sysfs_ops = &blk_mq_hw_sysfs_ops,
345 .default_attrs = default_hw_ctx_attrs,
Ming Lei74170112015-01-29 20:17:26 +0800346 .release = blk_mq_sysfs_release,
Jens Axboe320ae512013-10-24 09:20:05 +0100347};
348
Fengguang Wuee3c5db2014-05-30 10:31:13 -0600349static void blk_mq_unregister_hctx(struct blk_mq_hw_ctx *hctx)
Jens Axboe67aec142014-05-30 08:25:36 -0600350{
351 struct blk_mq_ctx *ctx;
352 int i;
353
Akinobu Mita4593fdb2015-09-27 02:09:20 +0900354 if (!hctx->nr_ctx)
Jens Axboe67aec142014-05-30 08:25:36 -0600355 return;
356
357 hctx_for_each_ctx(hctx, ctx, i)
358 kobject_del(&ctx->kobj);
359
360 kobject_del(&hctx->kobj);
361}
362
Fengguang Wuee3c5db2014-05-30 10:31:13 -0600363static int blk_mq_register_hctx(struct blk_mq_hw_ctx *hctx)
Jens Axboe67aec142014-05-30 08:25:36 -0600364{
365 struct request_queue *q = hctx->queue;
366 struct blk_mq_ctx *ctx;
367 int i, ret;
368
Akinobu Mita4593fdb2015-09-27 02:09:20 +0900369 if (!hctx->nr_ctx)
Jens Axboe67aec142014-05-30 08:25:36 -0600370 return 0;
371
372 ret = kobject_add(&hctx->kobj, &q->mq_kobj, "%u", hctx->queue_num);
373 if (ret)
374 return ret;
375
376 hctx_for_each_ctx(hctx, ctx, i) {
377 ret = kobject_add(&ctx->kobj, &hctx->kobj, "cpu%u", ctx->cpu);
378 if (ret)
379 break;
380 }
381
382 return ret;
383}
384
Jens Axboec0f3fd22016-08-02 08:45:44 -0600385static void __blk_mq_unregister_disk(struct gendisk *disk)
Jens Axboe320ae512013-10-24 09:20:05 +0100386{
387 struct request_queue *q = disk->queue;
Andrey Vagin85157362013-12-06 09:06:41 +0400388 struct blk_mq_hw_ctx *hctx;
389 struct blk_mq_ctx *ctx;
390 int i, j;
391
392 queue_for_each_hw_ctx(q, hctx, i) {
Jens Axboe67aec142014-05-30 08:25:36 -0600393 blk_mq_unregister_hctx(hctx);
394
395 hctx_for_each_ctx(hctx, ctx, j)
Andrey Vagin85157362013-12-06 09:06:41 +0400396 kobject_put(&ctx->kobj);
Jens Axboe67aec142014-05-30 08:25:36 -0600397
Andrey Vagin85157362013-12-06 09:06:41 +0400398 kobject_put(&hctx->kobj);
399 }
Jens Axboe320ae512013-10-24 09:20:05 +0100400
401 kobject_uevent(&q->mq_kobj, KOBJ_REMOVE);
402 kobject_del(&q->mq_kobj);
Andrey Vagin85157362013-12-06 09:06:41 +0400403 kobject_put(&q->mq_kobj);
Jens Axboe320ae512013-10-24 09:20:05 +0100404
405 kobject_put(&disk_to_dev(disk)->kobj);
Akinobu Mita4593fdb2015-09-27 02:09:20 +0900406
407 q->mq_sysfs_init_done = false;
Jens Axboec0f3fd22016-08-02 08:45:44 -0600408}
409
410void blk_mq_unregister_disk(struct gendisk *disk)
411{
412 blk_mq_disable_hotplug();
413 __blk_mq_unregister_disk(disk);
Akinobu Mita4593fdb2015-09-27 02:09:20 +0900414 blk_mq_enable_hotplug();
Jens Axboe320ae512013-10-24 09:20:05 +0100415}
416
Keith Busch868f2f02015-12-17 17:08:14 -0700417void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx)
418{
419 kobject_init(&hctx->kobj, &blk_mq_hw_ktype);
420}
421
Jens Axboe67aec142014-05-30 08:25:36 -0600422static void blk_mq_sysfs_init(struct request_queue *q)
423{
Jens Axboe67aec142014-05-30 08:25:36 -0600424 struct blk_mq_ctx *ctx;
Thomas Gleixner897bb0c2016-03-19 11:30:33 +0100425 int cpu;
Jens Axboe67aec142014-05-30 08:25:36 -0600426
427 kobject_init(&q->mq_kobj, &blk_mq_ktype);
428
Thomas Gleixner897bb0c2016-03-19 11:30:33 +0100429 for_each_possible_cpu(cpu) {
430 ctx = per_cpu_ptr(q->queue_ctx, cpu);
Takashi Iwai06a41a92014-12-10 16:38:30 +0100431 kobject_init(&ctx->kobj, &blk_mq_ctx_ktype);
Thomas Gleixner897bb0c2016-03-19 11:30:33 +0100432 }
Jens Axboe67aec142014-05-30 08:25:36 -0600433}
434
Jens Axboe320ae512013-10-24 09:20:05 +0100435int blk_mq_register_disk(struct gendisk *disk)
436{
437 struct device *dev = disk_to_dev(disk);
438 struct request_queue *q = disk->queue;
439 struct blk_mq_hw_ctx *hctx;
Jens Axboe67aec142014-05-30 08:25:36 -0600440 int ret, i;
Jens Axboe320ae512013-10-24 09:20:05 +0100441
Akinobu Mita4593fdb2015-09-27 02:09:20 +0900442 blk_mq_disable_hotplug();
443
Jens Axboe67aec142014-05-30 08:25:36 -0600444 blk_mq_sysfs_init(q);
Jens Axboe320ae512013-10-24 09:20:05 +0100445
446 ret = kobject_add(&q->mq_kobj, kobject_get(&dev->kobj), "%s", "mq");
447 if (ret < 0)
Akinobu Mita4593fdb2015-09-27 02:09:20 +0900448 goto out;
Jens Axboe320ae512013-10-24 09:20:05 +0100449
450 kobject_uevent(&q->mq_kobj, KOBJ_ADD);
451
452 queue_for_each_hw_ctx(q, hctx, i) {
Jens Axboe67aec142014-05-30 08:25:36 -0600453 ret = blk_mq_register_hctx(hctx);
Jens Axboe320ae512013-10-24 09:20:05 +0100454 if (ret)
455 break;
Jens Axboe320ae512013-10-24 09:20:05 +0100456 }
457
Akinobu Mita4593fdb2015-09-27 02:09:20 +0900458 if (ret)
Jens Axboec0f3fd22016-08-02 08:45:44 -0600459 __blk_mq_unregister_disk(disk);
Akinobu Mita4593fdb2015-09-27 02:09:20 +0900460 else
461 q->mq_sysfs_init_done = true;
462out:
463 blk_mq_enable_hotplug();
Jens Axboe320ae512013-10-24 09:20:05 +0100464
Akinobu Mita4593fdb2015-09-27 02:09:20 +0900465 return ret;
Jens Axboe320ae512013-10-24 09:20:05 +0100466}
Mike Snitzerb62c21b2015-03-12 23:56:02 -0400467EXPORT_SYMBOL_GPL(blk_mq_register_disk);
Jens Axboe67aec142014-05-30 08:25:36 -0600468
469void blk_mq_sysfs_unregister(struct request_queue *q)
470{
471 struct blk_mq_hw_ctx *hctx;
472 int i;
473
Akinobu Mita4593fdb2015-09-27 02:09:20 +0900474 if (!q->mq_sysfs_init_done)
475 return;
476
Jens Axboe67aec142014-05-30 08:25:36 -0600477 queue_for_each_hw_ctx(q, hctx, i)
478 blk_mq_unregister_hctx(hctx);
479}
480
481int blk_mq_sysfs_register(struct request_queue *q)
482{
483 struct blk_mq_hw_ctx *hctx;
484 int i, ret = 0;
485
Akinobu Mita4593fdb2015-09-27 02:09:20 +0900486 if (!q->mq_sysfs_init_done)
487 return ret;
488
Jens Axboe67aec142014-05-30 08:25:36 -0600489 queue_for_each_hw_ctx(q, hctx, i) {
490 ret = blk_mq_register_hctx(hctx);
491 if (ret)
492 break;
493 }
494
495 return ret;
496}