Christoph Hellwig | 8c16567 | 2019-04-30 14:42:39 -0400 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
Christoph Hellwig | 973c4e3 | 2016-09-14 16:18:56 +0200 | [diff] [blame] | 2 | /* |
| 3 | * Copyright (c) 2016 Christoph Hellwig. |
Christoph Hellwig | 973c4e3 | 2016-09-14 16:18:56 +0200 | [diff] [blame] | 4 | */ |
Stephen Rothwell | 8ec2ef2 | 2016-09-19 15:50:16 +1000 | [diff] [blame] | 5 | #include <linux/kobject.h> |
| 6 | #include <linux/blkdev.h> |
Christoph Hellwig | 973c4e3 | 2016-09-14 16:18:56 +0200 | [diff] [blame] | 7 | #include <linux/blk-mq.h> |
| 8 | #include <linux/blk-mq-pci.h> |
| 9 | #include <linux/pci.h> |
| 10 | #include <linux/module.h> |
| 11 | |
Minwoo Im | 0da73d0 | 2018-07-02 23:46:43 +0900 | [diff] [blame] | 12 | #include "blk-mq.h" |
| 13 | |
Christoph Hellwig | 973c4e3 | 2016-09-14 16:18:56 +0200 | [diff] [blame] | 14 | /** |
| 15 | * blk_mq_pci_map_queues - provide a default queue mapping for PCI device |
Bart Van Assche | 0542cd5 | 2019-05-30 17:00:49 -0700 | [diff] [blame] | 16 | * @qmap: CPU to hardware queue map. |
Christoph Hellwig | 973c4e3 | 2016-09-14 16:18:56 +0200 | [diff] [blame] | 17 | * @pdev: PCI device associated with @set. |
Keith Busch | f23f5bec | 2018-03-27 09:39:06 -0600 | [diff] [blame] | 18 | * @offset: Offset to use for the pci irq vector |
Christoph Hellwig | 973c4e3 | 2016-09-14 16:18:56 +0200 | [diff] [blame] | 19 | * |
| 20 | * This function assumes the PCI device @pdev has at least as many available |
Sagi Grimberg | 018c259 | 2017-03-29 20:04:36 +0300 | [diff] [blame] | 21 | * interrupt vectors as @set has queues. It will then query the vector |
Christoph Hellwig | 973c4e3 | 2016-09-14 16:18:56 +0200 | [diff] [blame] | 22 | * corresponding to each queue for it's affinity mask and built queue mapping |
| 23 | * that maps a queue to the CPUs that have irq affinity for the corresponding |
| 24 | * vector. |
| 25 | */ |
Jens Axboe | ed76e32 | 2018-10-29 13:06:14 -0600 | [diff] [blame] | 26 | int blk_mq_pci_map_queues(struct blk_mq_queue_map *qmap, struct pci_dev *pdev, |
Keith Busch | f23f5bec | 2018-03-27 09:39:06 -0600 | [diff] [blame] | 27 | int offset) |
Christoph Hellwig | 973c4e3 | 2016-09-14 16:18:56 +0200 | [diff] [blame] | 28 | { |
| 29 | const struct cpumask *mask; |
| 30 | unsigned int queue, cpu; |
| 31 | |
Jens Axboe | ed76e32 | 2018-10-29 13:06:14 -0600 | [diff] [blame] | 32 | for (queue = 0; queue < qmap->nr_queues; queue++) { |
Keith Busch | f23f5bec | 2018-03-27 09:39:06 -0600 | [diff] [blame] | 33 | mask = pci_irq_get_affinity(pdev, queue + offset); |
Christoph Hellwig | 973c4e3 | 2016-09-14 16:18:56 +0200 | [diff] [blame] | 34 | if (!mask) |
Christoph Hellwig | c005390 | 2017-08-17 12:24:47 +0200 | [diff] [blame] | 35 | goto fallback; |
Christoph Hellwig | 973c4e3 | 2016-09-14 16:18:56 +0200 | [diff] [blame] | 36 | |
| 37 | for_each_cpu(cpu, mask) |
Jens Axboe | 843477d | 2018-10-24 13:16:11 -0600 | [diff] [blame] | 38 | qmap->mq_map[cpu] = qmap->queue_offset + queue; |
Christoph Hellwig | 973c4e3 | 2016-09-14 16:18:56 +0200 | [diff] [blame] | 39 | } |
| 40 | |
| 41 | return 0; |
Christoph Hellwig | c005390 | 2017-08-17 12:24:47 +0200 | [diff] [blame] | 42 | |
| 43 | fallback: |
Jens Axboe | ed76e32 | 2018-10-29 13:06:14 -0600 | [diff] [blame] | 44 | WARN_ON_ONCE(qmap->nr_queues > 1); |
| 45 | blk_mq_clear_mq_map(qmap); |
Christoph Hellwig | c005390 | 2017-08-17 12:24:47 +0200 | [diff] [blame] | 46 | return 0; |
Christoph Hellwig | 973c4e3 | 2016-09-14 16:18:56 +0200 | [diff] [blame] | 47 | } |
| 48 | EXPORT_SYMBOL_GPL(blk_mq_pci_map_queues); |