blob: 996167f1de18dbd16696520cfdad96fc12500ae5 [file] [log] [blame]
Sagi Grimberg24c5dc62017-07-13 11:09:43 +03001/*
2 * Copyright (c) 2017 Sagi Grimberg.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 */
13#include <linux/blk-mq.h>
14#include <linux/blk-mq-rdma.h>
15#include <rdma/ib_verbs.h>
16
17/**
18 * blk_mq_rdma_map_queues - provide a default queue mapping for rdma device
19 * @set: tagset to provide the mapping for
20 * @dev: rdma device associated with @set.
21 * @first_vec: first interrupt vectors to use for queues (usually 0)
22 *
23 * This function assumes the rdma device @dev has at least as many available
24 * interrupt vetors as @set has queues. It will then query it's affinity mask
25 * and built queue mapping that maps a queue to the CPUs that have irq affinity
26 * for the corresponding vector.
27 *
28 * In case either the driver passed a @dev with less vectors than
29 * @set->nr_hw_queues, or @dev does not provide an affinity mask for a
30 * vector, we fallback to the naive mapping.
31 */
32int blk_mq_rdma_map_queues(struct blk_mq_tag_set *set,
33 struct ib_device *dev, int first_vec)
34{
35 const struct cpumask *mask;
36 unsigned int queue, cpu;
37
38 for (queue = 0; queue < set->nr_hw_queues; queue++) {
39 mask = ib_get_vector_affinity(dev, first_vec + queue);
40 if (!mask)
41 goto fallback;
42
43 for_each_cpu(cpu, mask)
44 set->mq_map[cpu] = queue;
45 }
46
47 return 0;
48
49fallback:
50 return blk_mq_map_queues(set);
51}
52EXPORT_SYMBOL_GPL(blk_mq_rdma_map_queues);