blob: d0634bcf322f1f2a096d9d3abb4ae7f1286e73b4 [file] [log] [blame]
Jens Axboe75bb4622014-05-28 10:15:41 -06001/*
2 * CPU <-> hardware queue mapping helpers
3 *
4 * Copyright (C) 2013-2014 Jens Axboe
5 */
Jens Axboe320ae512013-10-24 09:20:05 +01006#include <linux/kernel.h>
7#include <linux/threads.h>
8#include <linux/module.h>
9#include <linux/mm.h>
10#include <linux/smp.h>
11#include <linux/cpu.h>
12
13#include <linux/blk-mq.h>
14#include "blk.h"
15#include "blk-mq.h"
16
Jens Axboe320ae512013-10-24 09:20:05 +010017static int cpu_to_queue_index(unsigned int nr_cpus, unsigned int nr_queues,
18 const int cpu)
19{
Bart Van Assche959f5f52014-12-09 16:59:21 +010020 return cpu * nr_queues / nr_cpus;
Jens Axboe320ae512013-10-24 09:20:05 +010021}
22
23static int get_first_sibling(unsigned int cpu)
24{
25 unsigned int ret;
26
Bartosz Golaszewski06931e62015-05-26 15:11:28 +020027 ret = cpumask_first(topology_sibling_cpumask(cpu));
Jens Axboe320ae512013-10-24 09:20:05 +010028 if (ret < nr_cpu_ids)
29 return ret;
30
31 return cpu;
32}
33
Akinobu Mita57783222015-09-27 02:09:23 +090034int blk_mq_update_queue_map(unsigned int *map, unsigned int nr_queues,
35 const struct cpumask *online_mask)
Jens Axboe320ae512013-10-24 09:20:05 +010036{
37 unsigned int i, nr_cpus, nr_uniq_cpus, queue, first_sibling;
38 cpumask_var_t cpus;
39
40 if (!alloc_cpumask_var(&cpus, GFP_ATOMIC))
41 return 1;
42
43 cpumask_clear(cpus);
44 nr_cpus = nr_uniq_cpus = 0;
Akinobu Mita57783222015-09-27 02:09:23 +090045 for_each_cpu(i, online_mask) {
Jens Axboe320ae512013-10-24 09:20:05 +010046 nr_cpus++;
47 first_sibling = get_first_sibling(i);
48 if (!cpumask_test_cpu(first_sibling, cpus))
49 nr_uniq_cpus++;
50 cpumask_set_cpu(i, cpus);
51 }
52
53 queue = 0;
54 for_each_possible_cpu(i) {
Akinobu Mita57783222015-09-27 02:09:23 +090055 if (!cpumask_test_cpu(i, online_mask)) {
Jens Axboe320ae512013-10-24 09:20:05 +010056 map[i] = 0;
57 continue;
58 }
59
60 /*
61 * Easy case - we have equal or more hardware queues. Or
62 * there are no thread siblings to take into account. Do
63 * 1:1 if enough, or sequential mapping if less.
64 */
65 if (nr_queues >= nr_cpus || nr_cpus == nr_uniq_cpus) {
66 map[i] = cpu_to_queue_index(nr_cpus, nr_queues, queue);
67 queue++;
68 continue;
69 }
70
71 /*
72 * Less then nr_cpus queues, and we have some number of
73 * threads per cores. Map sibling threads to the same
74 * queue.
75 */
76 first_sibling = get_first_sibling(i);
77 if (first_sibling == i) {
78 map[i] = cpu_to_queue_index(nr_uniq_cpus, nr_queues,
79 queue);
80 queue++;
81 } else
82 map[i] = map[first_sibling];
83 }
84
Jens Axboe320ae512013-10-24 09:20:05 +010085 free_cpumask_var(cpus);
86 return 0;
87}
88
Christoph Hellwig24d2f902014-04-15 14:14:00 -060089unsigned int *blk_mq_make_queue_map(struct blk_mq_tag_set *set)
Jens Axboe320ae512013-10-24 09:20:05 +010090{
91 unsigned int *map;
92
93 /* If cpus are offline, map them to first hctx */
Jens Axboea33c1ba2014-11-24 15:02:42 -070094 map = kzalloc_node(sizeof(*map) * nr_cpu_ids, GFP_KERNEL,
Christoph Hellwig24d2f902014-04-15 14:14:00 -060095 set->numa_node);
Jens Axboe320ae512013-10-24 09:20:05 +010096 if (!map)
97 return NULL;
98
Akinobu Mita57783222015-09-27 02:09:23 +090099 if (!blk_mq_update_queue_map(map, set->nr_hw_queues, cpu_online_mask))
Jens Axboe320ae512013-10-24 09:20:05 +0100100 return map;
101
102 kfree(map);
103 return NULL;
104}
Jens Axboef14bbe72014-05-27 12:06:53 -0600105
106/*
107 * We have no quick way of doing reverse lookups. This is only used at
108 * queue init time, so runtime isn't important.
109 */
110int blk_mq_hw_queue_to_node(unsigned int *mq_map, unsigned int index)
111{
112 int i;
113
114 for_each_possible_cpu(i) {
115 if (index == mq_map[i])
Raghavendra K Tbffed452015-12-02 16:59:05 +0530116 return local_memory_node(cpu_to_node(i));
Jens Axboef14bbe72014-05-27 12:06:53 -0600117 }
118
119 return NUMA_NO_NODE;
120}