blob: 5f13f4d0bcceda747589a300170537eefb4810eb [file] [log] [blame]
Jens Axboe75bb4622014-05-28 10:15:41 -06001/*
2 * CPU <-> hardware queue mapping helpers
3 *
4 * Copyright (C) 2013-2014 Jens Axboe
5 */
Jens Axboe320ae512013-10-24 09:20:05 +01006#include <linux/kernel.h>
7#include <linux/threads.h>
8#include <linux/module.h>
9#include <linux/mm.h>
10#include <linux/smp.h>
11#include <linux/cpu.h>
12
13#include <linux/blk-mq.h>
14#include "blk.h"
15#include "blk-mq.h"
16
Jens Axboe320ae512013-10-24 09:20:05 +010017static int cpu_to_queue_index(unsigned int nr_cpus, unsigned int nr_queues,
18 const int cpu)
19{
Bart Van Assche959f5f52014-12-09 16:59:21 +010020 return cpu * nr_queues / nr_cpus;
Jens Axboe320ae512013-10-24 09:20:05 +010021}
22
23static int get_first_sibling(unsigned int cpu)
24{
25 unsigned int ret;
26
27 ret = cpumask_first(topology_thread_cpumask(cpu));
28 if (ret < nr_cpu_ids)
29 return ret;
30
31 return cpu;
32}
33
34int blk_mq_update_queue_map(unsigned int *map, unsigned int nr_queues)
35{
36 unsigned int i, nr_cpus, nr_uniq_cpus, queue, first_sibling;
37 cpumask_var_t cpus;
38
39 if (!alloc_cpumask_var(&cpus, GFP_ATOMIC))
40 return 1;
41
42 cpumask_clear(cpus);
43 nr_cpus = nr_uniq_cpus = 0;
44 for_each_online_cpu(i) {
45 nr_cpus++;
46 first_sibling = get_first_sibling(i);
47 if (!cpumask_test_cpu(first_sibling, cpus))
48 nr_uniq_cpus++;
49 cpumask_set_cpu(i, cpus);
50 }
51
52 queue = 0;
53 for_each_possible_cpu(i) {
54 if (!cpu_online(i)) {
55 map[i] = 0;
56 continue;
57 }
58
59 /*
60 * Easy case - we have equal or more hardware queues. Or
61 * there are no thread siblings to take into account. Do
62 * 1:1 if enough, or sequential mapping if less.
63 */
64 if (nr_queues >= nr_cpus || nr_cpus == nr_uniq_cpus) {
65 map[i] = cpu_to_queue_index(nr_cpus, nr_queues, queue);
66 queue++;
67 continue;
68 }
69
70 /*
71 * Less then nr_cpus queues, and we have some number of
72 * threads per cores. Map sibling threads to the same
73 * queue.
74 */
75 first_sibling = get_first_sibling(i);
76 if (first_sibling == i) {
77 map[i] = cpu_to_queue_index(nr_uniq_cpus, nr_queues,
78 queue);
79 queue++;
80 } else
81 map[i] = map[first_sibling];
82 }
83
Jens Axboe320ae512013-10-24 09:20:05 +010084 free_cpumask_var(cpus);
85 return 0;
86}
87
Christoph Hellwig24d2f902014-04-15 14:14:00 -060088unsigned int *blk_mq_make_queue_map(struct blk_mq_tag_set *set)
Jens Axboe320ae512013-10-24 09:20:05 +010089{
90 unsigned int *map;
91
92 /* If cpus are offline, map them to first hctx */
Jens Axboea33c1ba2014-11-24 15:02:42 -070093 map = kzalloc_node(sizeof(*map) * nr_cpu_ids, GFP_KERNEL,
Christoph Hellwig24d2f902014-04-15 14:14:00 -060094 set->numa_node);
Jens Axboe320ae512013-10-24 09:20:05 +010095 if (!map)
96 return NULL;
97
Christoph Hellwig24d2f902014-04-15 14:14:00 -060098 if (!blk_mq_update_queue_map(map, set->nr_hw_queues))
Jens Axboe320ae512013-10-24 09:20:05 +010099 return map;
100
101 kfree(map);
102 return NULL;
103}
Jens Axboef14bbe72014-05-27 12:06:53 -0600104
105/*
106 * We have no quick way of doing reverse lookups. This is only used at
107 * queue init time, so runtime isn't important.
108 */
109int blk_mq_hw_queue_to_node(unsigned int *mq_map, unsigned int index)
110{
111 int i;
112
113 for_each_possible_cpu(i) {
114 if (index == mq_map[i])
115 return cpu_to_node(i);
116 }
117
118 return NUMA_NO_NODE;
119}