blob: e233996bb76f2f94d1cef4249132144e379e90cd [file] [log] [blame]
Christoph Hellwig973c4e32016-09-14 16:18:56 +02001/*
2 * Copyright (c) 2016 Christoph Hellwig.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 */
Stephen Rothwell8ec2ef22016-09-19 15:50:16 +100013#include <linux/kobject.h>
14#include <linux/blkdev.h>
Christoph Hellwig973c4e32016-09-14 16:18:56 +020015#include <linux/blk-mq.h>
16#include <linux/blk-mq-pci.h>
17#include <linux/pci.h>
18#include <linux/module.h>
19
20/**
21 * blk_mq_pci_map_queues - provide a default queue mapping for PCI device
22 * @set: tagset to provide the mapping for
23 * @pdev: PCI device associated with @set.
Keith Buschf23f5bec2018-03-27 09:39:06 -060024 * @offset: Offset to use for the pci irq vector
Christoph Hellwig973c4e32016-09-14 16:18:56 +020025 *
26 * This function assumes the PCI device @pdev has at least as many available
Sagi Grimberg018c2592017-03-29 20:04:36 +030027 * interrupt vectors as @set has queues. It will then query the vector
Christoph Hellwig973c4e32016-09-14 16:18:56 +020028 * corresponding to each queue for it's affinity mask and built queue mapping
29 * that maps a queue to the CPUs that have irq affinity for the corresponding
30 * vector.
31 */
Keith Buschf23f5bec2018-03-27 09:39:06 -060032int blk_mq_pci_map_queues(struct blk_mq_tag_set *set, struct pci_dev *pdev,
33 int offset)
Christoph Hellwig973c4e32016-09-14 16:18:56 +020034{
35 const struct cpumask *mask;
36 unsigned int queue, cpu;
37
38 for (queue = 0; queue < set->nr_hw_queues; queue++) {
Keith Buschf23f5bec2018-03-27 09:39:06 -060039 mask = pci_irq_get_affinity(pdev, queue + offset);
Christoph Hellwig973c4e32016-09-14 16:18:56 +020040 if (!mask)
Christoph Hellwigc0053902017-08-17 12:24:47 +020041 goto fallback;
Christoph Hellwig973c4e32016-09-14 16:18:56 +020042
43 for_each_cpu(cpu, mask)
44 set->mq_map[cpu] = queue;
45 }
46
47 return 0;
Christoph Hellwigc0053902017-08-17 12:24:47 +020048
49fallback:
50 WARN_ON_ONCE(set->nr_hw_queues > 1);
51 for_each_possible_cpu(cpu)
52 set->mq_map[cpu] = 0;
53 return 0;
Christoph Hellwig973c4e32016-09-14 16:18:56 +020054}
55EXPORT_SYMBOL_GPL(blk_mq_pci_map_queues);