blk-mq: abstract out queue map
This is in preparation for allowing multiple sets of maps per
queue, if so desired.
Reviewed-by: Hannes Reinecke <hare@suse.com>
Reviewed-by: Bart Van Assche <bvanassche@acm.org>
Reviewed-by: Keith Busch <keith.busch@intel.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
diff --git a/block/blk-mq-cpumap.c b/block/blk-mq-cpumap.c
index 3eb169f..6e6686c 100644
--- a/block/blk-mq-cpumap.c
+++ b/block/blk-mq-cpumap.c
@@ -30,10 +30,10 @@
return cpu;
}
-int blk_mq_map_queues(struct blk_mq_tag_set *set)
+int blk_mq_map_queues(struct blk_mq_queue_map *qmap)
{
- unsigned int *map = set->mq_map;
- unsigned int nr_queues = set->nr_hw_queues;
+ unsigned int *map = qmap->mq_map;
+ unsigned int nr_queues = qmap->nr_queues;
unsigned int cpu, first_sibling;
for_each_possible_cpu(cpu) {
@@ -62,12 +62,12 @@
* We have no quick way of doing reverse lookups. This is only used at
* queue init time, so runtime isn't important.
*/
-int blk_mq_hw_queue_to_node(unsigned int *mq_map, unsigned int index)
+int blk_mq_hw_queue_to_node(struct blk_mq_queue_map *qmap, unsigned int index)
{
int i;
for_each_possible_cpu(i) {
- if (index == mq_map[i])
+ if (index == qmap->mq_map[i])
return local_memory_node(cpu_to_node(i));
}
diff --git a/block/blk-mq-pci.c b/block/blk-mq-pci.c
index db644ec..40333d60 100644
--- a/block/blk-mq-pci.c
+++ b/block/blk-mq-pci.c
@@ -31,26 +31,26 @@
* that maps a queue to the CPUs that have irq affinity for the corresponding
* vector.
*/
-int blk_mq_pci_map_queues(struct blk_mq_tag_set *set, struct pci_dev *pdev,
+int blk_mq_pci_map_queues(struct blk_mq_queue_map *qmap, struct pci_dev *pdev,
int offset)
{
const struct cpumask *mask;
unsigned int queue, cpu;
- for (queue = 0; queue < set->nr_hw_queues; queue++) {
+ for (queue = 0; queue < qmap->nr_queues; queue++) {
mask = pci_irq_get_affinity(pdev, queue + offset);
if (!mask)
goto fallback;
for_each_cpu(cpu, mask)
- set->mq_map[cpu] = queue;
+ qmap->mq_map[cpu] = queue;
}
return 0;
fallback:
- WARN_ON_ONCE(set->nr_hw_queues > 1);
- blk_mq_clear_mq_map(set);
+ WARN_ON_ONCE(qmap->nr_queues > 1);
+ blk_mq_clear_mq_map(qmap);
return 0;
}
EXPORT_SYMBOL_GPL(blk_mq_pci_map_queues);
diff --git a/block/blk-mq-rdma.c b/block/blk-mq-rdma.c
index 996167f..a71576a 100644
--- a/block/blk-mq-rdma.c
+++ b/block/blk-mq-rdma.c
@@ -41,12 +41,12 @@
goto fallback;
for_each_cpu(cpu, mask)
- set->mq_map[cpu] = queue;
+ set->map[0].mq_map[cpu] = queue;
}
return 0;
fallback:
- return blk_mq_map_queues(set);
+ return blk_mq_map_queues(&set->map[0]);
}
EXPORT_SYMBOL_GPL(blk_mq_rdma_map_queues);
diff --git a/block/blk-mq-virtio.c b/block/blk-mq-virtio.c
index c3afbca..661fbfe 100644
--- a/block/blk-mq-virtio.c
+++ b/block/blk-mq-virtio.c
@@ -29,7 +29,7 @@
* that maps a queue to the CPUs that have irq affinity for the corresponding
* vector.
*/
-int blk_mq_virtio_map_queues(struct blk_mq_tag_set *set,
+int blk_mq_virtio_map_queues(struct blk_mq_queue_map *qmap,
struct virtio_device *vdev, int first_vec)
{
const struct cpumask *mask;
@@ -38,17 +38,17 @@
if (!vdev->config->get_vq_affinity)
goto fallback;
- for (queue = 0; queue < set->nr_hw_queues; queue++) {
+ for (queue = 0; queue < qmap->nr_queues; queue++) {
mask = vdev->config->get_vq_affinity(vdev, first_vec + queue);
if (!mask)
goto fallback;
for_each_cpu(cpu, mask)
- set->mq_map[cpu] = queue;
+ qmap->mq_map[cpu] = queue;
}
return 0;
fallback:
- return blk_mq_map_queues(set);
+ return blk_mq_map_queues(qmap);
}
EXPORT_SYMBOL_GPL(blk_mq_virtio_map_queues);
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 766facf..fac88d1 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -1975,7 +1975,7 @@
struct blk_mq_tags *tags;
int node;
- node = blk_mq_hw_queue_to_node(set->mq_map, hctx_idx);
+ node = blk_mq_hw_queue_to_node(&set->map[0], hctx_idx);
if (node == NUMA_NO_NODE)
node = set->numa_node;
@@ -2031,7 +2031,7 @@
size_t rq_size, left;
int node;
- node = blk_mq_hw_queue_to_node(set->mq_map, hctx_idx);
+ node = blk_mq_hw_queue_to_node(&set->map[0], hctx_idx);
if (node == NUMA_NO_NODE)
node = set->numa_node;
@@ -2322,7 +2322,7 @@
* If the cpu isn't present, the cpu is mapped to first hctx.
*/
for_each_possible_cpu(i) {
- hctx_idx = set->mq_map[i];
+ hctx_idx = set->map[0].mq_map[i];
/* unmapped hw queue can be remapped after CPU topo changed */
if (!set->tags[hctx_idx] &&
!__blk_mq_alloc_rq_map(set, hctx_idx)) {
@@ -2332,7 +2332,7 @@
* case, remap the current ctx to hctx[0] which
* is guaranteed to always have tags allocated
*/
- set->mq_map[i] = 0;
+ set->map[0].mq_map[i] = 0;
}
ctx = per_cpu_ptr(q->queue_ctx, i);
@@ -2585,7 +2585,7 @@
int node;
struct blk_mq_hw_ctx *hctx;
- node = blk_mq_hw_queue_to_node(set->mq_map, i);
+ node = blk_mq_hw_queue_to_node(&set->map[0], i);
/*
* If the hw queue has been mapped to another numa node,
* we need to realloc the hctx. If allocation fails, fallback
@@ -2791,18 +2791,18 @@
* for (queue = 0; queue < set->nr_hw_queues; queue++) {
* mask = get_cpu_mask(queue)
* for_each_cpu(cpu, mask)
- * set->mq_map[cpu] = queue;
+ * set->map.mq_map[cpu] = queue;
* }
*
* When we need to remap, the table has to be cleared for
* killing stale mapping since one CPU may not be mapped
* to any hw queue.
*/
- blk_mq_clear_mq_map(set);
+ blk_mq_clear_mq_map(&set->map[0]);
return set->ops->map_queues(set);
} else
- return blk_mq_map_queues(set);
+ return blk_mq_map_queues(&set->map[0]);
}
/*
@@ -2857,10 +2857,12 @@
return -ENOMEM;
ret = -ENOMEM;
- set->mq_map = kcalloc_node(nr_cpu_ids, sizeof(*set->mq_map),
- GFP_KERNEL, set->numa_node);
- if (!set->mq_map)
+ set->map[0].mq_map = kcalloc_node(nr_cpu_ids,
+ sizeof(*set->map[0].mq_map),
+ GFP_KERNEL, set->numa_node);
+ if (!set->map[0].mq_map)
goto out_free_tags;
+ set->map[0].nr_queues = set->nr_hw_queues;
ret = blk_mq_update_queue_map(set);
if (ret)
@@ -2876,8 +2878,8 @@
return 0;
out_free_mq_map:
- kfree(set->mq_map);
- set->mq_map = NULL;
+ kfree(set->map[0].mq_map);
+ set->map[0].mq_map = NULL;
out_free_tags:
kfree(set->tags);
set->tags = NULL;
@@ -2892,8 +2894,8 @@
for (i = 0; i < nr_cpu_ids; i++)
blk_mq_free_map_and_requests(set, i);
- kfree(set->mq_map);
- set->mq_map = NULL;
+ kfree(set->map[0].mq_map);
+ set->map[0].mq_map = NULL;
kfree(set->tags);
set->tags = NULL;
@@ -3054,7 +3056,7 @@
pr_warn("Increasing nr_hw_queues to %d fails, fallback to %d\n",
nr_hw_queues, prev_nr_hw_queues);
set->nr_hw_queues = prev_nr_hw_queues;
- blk_mq_map_queues(set);
+ blk_mq_map_queues(&set->map[0]);
goto fallback;
}
blk_mq_map_swqueue(q);
diff --git a/block/blk-mq.h b/block/blk-mq.h
index 9536be0..889f006 100644
--- a/block/blk-mq.h
+++ b/block/blk-mq.h
@@ -70,14 +70,14 @@
/*
* CPU -> queue mappings
*/
-extern int blk_mq_hw_queue_to_node(unsigned int *map, unsigned int);
+extern int blk_mq_hw_queue_to_node(struct blk_mq_queue_map *qmap, unsigned int);
static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q,
int cpu)
{
struct blk_mq_tag_set *set = q->tag_set;
- return q->queue_hw_ctx[set->mq_map[cpu]];
+ return q->queue_hw_ctx[set->map[0].mq_map[cpu]];
}
/*
@@ -206,12 +206,12 @@
__blk_mq_put_driver_tag(hctx, rq);
}
-static inline void blk_mq_clear_mq_map(struct blk_mq_tag_set *set)
+static inline void blk_mq_clear_mq_map(struct blk_mq_queue_map *qmap)
{
int cpu;
for_each_possible_cpu(cpu)
- set->mq_map[cpu] = 0;
+ qmap->mq_map[cpu] = 0;
}
#endif
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 086c6bb..6e869d0 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -624,7 +624,7 @@
{
struct virtio_blk *vblk = set->driver_data;
- return blk_mq_virtio_map_queues(set, vblk->vdev, 0);
+ return blk_mq_virtio_map_queues(&set->map[0], vblk->vdev, 0);
}
#ifdef CONFIG_VIRTIO_BLK_SCSI
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index c33bb20..49ad854 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -435,7 +435,7 @@
{
struct nvme_dev *dev = set->driver_data;
- return blk_mq_pci_map_queues(set, to_pci_dev(dev->dev),
+ return blk_mq_pci_map_queues(&set->map[0], to_pci_dev(dev->dev),
dev->num_vecs > 1 ? 1 /* admin queue */ : 0);
}
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 29dfd1b..fdf3e52e 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -6934,11 +6934,12 @@
{
int rc;
scsi_qla_host_t *vha = (scsi_qla_host_t *)shost->hostdata;
+ struct blk_mq_queue_map *qmap = &shost->tag_set.map[0];
if (USER_CTRL_IRQ(vha->hw))
- rc = blk_mq_map_queues(&shost->tag_set);
+ rc = blk_mq_map_queues(qmap);
else
- rc = blk_mq_pci_map_queues(&shost->tag_set, vha->hw->pdev, 0);
+ rc = blk_mq_pci_map_queues(qmap, vha->hw->pdev, 0);
return rc;
}
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 651be30..ed81b8e 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -1812,7 +1812,7 @@
if (shost->hostt->map_queues)
return shost->hostt->map_queues(shost);
- return blk_mq_map_queues(set);
+ return blk_mq_map_queues(&set->map[0]);
}
void __scsi_init_queue(struct Scsi_Host *shost, struct request_queue *q)
diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c
index a25a07a..bac0842 100644
--- a/drivers/scsi/smartpqi/smartpqi_init.c
+++ b/drivers/scsi/smartpqi/smartpqi_init.c
@@ -5319,7 +5319,8 @@
{
struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
- return blk_mq_pci_map_queues(&shost->tag_set, ctrl_info->pci_dev, 0);
+ return blk_mq_pci_map_queues(&shost->tag_set.map[0],
+ ctrl_info->pci_dev, 0);
}
static int pqi_getpciinfo_ioctl(struct pqi_ctrl_info *ctrl_info,
diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c
index 1c72db9..c3c95b3 100644
--- a/drivers/scsi/virtio_scsi.c
+++ b/drivers/scsi/virtio_scsi.c
@@ -719,8 +719,9 @@
static int virtscsi_map_queues(struct Scsi_Host *shost)
{
struct virtio_scsi *vscsi = shost_priv(shost);
+ struct blk_mq_queue_map *qmap = &shost->tag_set.map[0];
- return blk_mq_virtio_map_queues(&shost->tag_set, vscsi->vdev, 2);
+ return blk_mq_virtio_map_queues(qmap, vscsi->vdev, 2);
}
/*
diff --git a/include/linux/blk-mq-pci.h b/include/linux/blk-mq-pci.h
index 9f4c17f..0b1f45c 100644
--- a/include/linux/blk-mq-pci.h
+++ b/include/linux/blk-mq-pci.h
@@ -2,10 +2,10 @@
#ifndef _LINUX_BLK_MQ_PCI_H
#define _LINUX_BLK_MQ_PCI_H
-struct blk_mq_tag_set;
+struct blk_mq_queue_map;
struct pci_dev;
-int blk_mq_pci_map_queues(struct blk_mq_tag_set *set, struct pci_dev *pdev,
+int blk_mq_pci_map_queues(struct blk_mq_queue_map *qmap, struct pci_dev *pdev,
int offset);
#endif /* _LINUX_BLK_MQ_PCI_H */
diff --git a/include/linux/blk-mq-virtio.h b/include/linux/blk-mq-virtio.h
index 69b4da26..687ae28 100644
--- a/include/linux/blk-mq-virtio.h
+++ b/include/linux/blk-mq-virtio.h
@@ -2,10 +2,10 @@
#ifndef _LINUX_BLK_MQ_VIRTIO_H
#define _LINUX_BLK_MQ_VIRTIO_H
-struct blk_mq_tag_set;
+struct blk_mq_queue_map;
struct virtio_device;
-int blk_mq_virtio_map_queues(struct blk_mq_tag_set *set,
+int blk_mq_virtio_map_queues(struct blk_mq_queue_map *qmap,
struct virtio_device *vdev, int first_vec);
#endif /* _LINUX_BLK_MQ_VIRTIO_H */
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index d83a26f..1761648 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -74,10 +74,19 @@
struct srcu_struct srcu[0];
};
+struct blk_mq_queue_map {
+ unsigned int *mq_map;
+ unsigned int nr_queues;
+};
+
+enum {
+ HCTX_MAX_TYPES = 1,
+};
+
struct blk_mq_tag_set {
- unsigned int *mq_map;
+ struct blk_mq_queue_map map[HCTX_MAX_TYPES];
const struct blk_mq_ops *ops;
- unsigned int nr_hw_queues;
+ unsigned int nr_hw_queues; /* nr hw queues across maps */
unsigned int queue_depth; /* max hw supported */
unsigned int reserved_tags;
unsigned int cmd_size; /* per-request extra data */
@@ -295,7 +304,7 @@
int blk_mq_freeze_queue_wait_timeout(struct request_queue *q,
unsigned long timeout);
-int blk_mq_map_queues(struct blk_mq_tag_set *set);
+int blk_mq_map_queues(struct blk_mq_queue_map *qmap);
void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues);
void blk_mq_quiesce_queue_nowait(struct request_queue *q);