blob: 0daacb927be1a32e5731c48f7fd6953ea291fc30 [file] [log] [blame]
Jens Axboe320ae512013-10-24 09:20:05 +01001#include <linux/kernel.h>
2#include <linux/threads.h>
3#include <linux/module.h>
4#include <linux/mm.h>
5#include <linux/smp.h>
6#include <linux/cpu.h>
7
8#include <linux/blk-mq.h>
9#include "blk.h"
10#include "blk-mq.h"
11
Jens Axboe320ae512013-10-24 09:20:05 +010012static int cpu_to_queue_index(unsigned int nr_cpus, unsigned int nr_queues,
13 const int cpu)
14{
15 return cpu / ((nr_cpus + nr_queues - 1) / nr_queues);
16}
17
18static int get_first_sibling(unsigned int cpu)
19{
20 unsigned int ret;
21
22 ret = cpumask_first(topology_thread_cpumask(cpu));
23 if (ret < nr_cpu_ids)
24 return ret;
25
26 return cpu;
27}
28
29int blk_mq_update_queue_map(unsigned int *map, unsigned int nr_queues)
30{
31 unsigned int i, nr_cpus, nr_uniq_cpus, queue, first_sibling;
32 cpumask_var_t cpus;
33
34 if (!alloc_cpumask_var(&cpus, GFP_ATOMIC))
35 return 1;
36
37 cpumask_clear(cpus);
38 nr_cpus = nr_uniq_cpus = 0;
39 for_each_online_cpu(i) {
40 nr_cpus++;
41 first_sibling = get_first_sibling(i);
42 if (!cpumask_test_cpu(first_sibling, cpus))
43 nr_uniq_cpus++;
44 cpumask_set_cpu(i, cpus);
45 }
46
47 queue = 0;
48 for_each_possible_cpu(i) {
49 if (!cpu_online(i)) {
50 map[i] = 0;
51 continue;
52 }
53
54 /*
55 * Easy case - we have equal or more hardware queues. Or
56 * there are no thread siblings to take into account. Do
57 * 1:1 if enough, or sequential mapping if less.
58 */
59 if (nr_queues >= nr_cpus || nr_cpus == nr_uniq_cpus) {
60 map[i] = cpu_to_queue_index(nr_cpus, nr_queues, queue);
61 queue++;
62 continue;
63 }
64
65 /*
66 * Less then nr_cpus queues, and we have some number of
67 * threads per cores. Map sibling threads to the same
68 * queue.
69 */
70 first_sibling = get_first_sibling(i);
71 if (first_sibling == i) {
72 map[i] = cpu_to_queue_index(nr_uniq_cpus, nr_queues,
73 queue);
74 queue++;
75 } else
76 map[i] = map[first_sibling];
77 }
78
Jens Axboe320ae512013-10-24 09:20:05 +010079 free_cpumask_var(cpus);
80 return 0;
81}
82
Christoph Hellwig24d2f902014-04-15 14:14:00 -060083unsigned int *blk_mq_make_queue_map(struct blk_mq_tag_set *set)
Jens Axboe320ae512013-10-24 09:20:05 +010084{
85 unsigned int *map;
86
87 /* If cpus are offline, map them to first hctx */
88 map = kzalloc_node(sizeof(*map) * num_possible_cpus(), GFP_KERNEL,
Christoph Hellwig24d2f902014-04-15 14:14:00 -060089 set->numa_node);
Jens Axboe320ae512013-10-24 09:20:05 +010090 if (!map)
91 return NULL;
92
Christoph Hellwig24d2f902014-04-15 14:14:00 -060093 if (!blk_mq_update_queue_map(map, set->nr_hw_queues))
Jens Axboe320ae512013-10-24 09:20:05 +010094 return map;
95
96 kfree(map);
97 return NULL;
98}
Jens Axboef14bbe72014-05-27 12:06:53 -060099
100/*
101 * We have no quick way of doing reverse lookups. This is only used at
102 * queue init time, so runtime isn't important.
103 */
104int blk_mq_hw_queue_to_node(unsigned int *mq_map, unsigned int index)
105{
106 int i;
107
108 for_each_possible_cpu(i) {
109 if (index == mq_map[i])
110 return cpu_to_node(i);
111 }
112
113 return NUMA_NO_NODE;
114}