blob: db644ec624f501ee63f7d90d35412e0dbabf603a [file] [log] [blame]
Christoph Hellwig973c4e32016-09-14 16:18:56 +02001/*
2 * Copyright (c) 2016 Christoph Hellwig.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 */
Stephen Rothwell8ec2ef22016-09-19 15:50:16 +100013#include <linux/kobject.h>
14#include <linux/blkdev.h>
Christoph Hellwig973c4e32016-09-14 16:18:56 +020015#include <linux/blk-mq.h>
16#include <linux/blk-mq-pci.h>
17#include <linux/pci.h>
18#include <linux/module.h>
19
Minwoo Im0da73d02018-07-02 23:46:43 +090020#include "blk-mq.h"
21
Christoph Hellwig973c4e32016-09-14 16:18:56 +020022/**
23 * blk_mq_pci_map_queues - provide a default queue mapping for PCI device
24 * @set: tagset to provide the mapping for
25 * @pdev: PCI device associated with @set.
Keith Buschf23f5bec2018-03-27 09:39:06 -060026 * @offset: Offset to use for the pci irq vector
Christoph Hellwig973c4e32016-09-14 16:18:56 +020027 *
28 * This function assumes the PCI device @pdev has at least as many available
Sagi Grimberg018c2592017-03-29 20:04:36 +030029 * interrupt vectors as @set has queues. It will then query the vector
Christoph Hellwig973c4e32016-09-14 16:18:56 +020030 * corresponding to each queue for it's affinity mask and built queue mapping
31 * that maps a queue to the CPUs that have irq affinity for the corresponding
32 * vector.
33 */
Keith Buschf23f5bec2018-03-27 09:39:06 -060034int blk_mq_pci_map_queues(struct blk_mq_tag_set *set, struct pci_dev *pdev,
35 int offset)
Christoph Hellwig973c4e32016-09-14 16:18:56 +020036{
37 const struct cpumask *mask;
38 unsigned int queue, cpu;
39
40 for (queue = 0; queue < set->nr_hw_queues; queue++) {
Keith Buschf23f5bec2018-03-27 09:39:06 -060041 mask = pci_irq_get_affinity(pdev, queue + offset);
Christoph Hellwig973c4e32016-09-14 16:18:56 +020042 if (!mask)
Christoph Hellwigc0053902017-08-17 12:24:47 +020043 goto fallback;
Christoph Hellwig973c4e32016-09-14 16:18:56 +020044
45 for_each_cpu(cpu, mask)
46 set->mq_map[cpu] = queue;
47 }
48
49 return 0;
Christoph Hellwigc0053902017-08-17 12:24:47 +020050
51fallback:
52 WARN_ON_ONCE(set->nr_hw_queues > 1);
Minwoo Im0da73d02018-07-02 23:46:43 +090053 blk_mq_clear_mq_map(set);
Christoph Hellwigc0053902017-08-17 12:24:47 +020054 return 0;
Christoph Hellwig973c4e32016-09-14 16:18:56 +020055}
56EXPORT_SYMBOL_GPL(blk_mq_pci_map_queues);