| 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | /* |
| 3 | * Copyright (c) 2016 Christoph Hellwig. |
| 4 | */ |
| 5 | #include <linux/kobject.h> |
| 6 | #include <linux/blkdev.h> |
| 7 | #include <linux/blk-mq-pci.h> |
| 8 | #include <linux/pci.h> |
| 9 | #include <linux/module.h> |
| 10 | |
| 11 | #include "blk-mq.h" |
| 12 | |
| 13 | /** |
| 14 | * blk_mq_pci_map_queues - provide a default queue mapping for PCI device |
| 15 | * @qmap: CPU to hardware queue map. |
| 16 | * @pdev: PCI device associated with @set. |
| 17 | * @offset: Offset to use for the pci irq vector |
| 18 | * |
| 19 | * This function assumes the PCI device @pdev has at least as many available |
| 20 | * interrupt vectors as @set has queues. It will then query the vector |
| 21 | * corresponding to each queue for it's affinity mask and built queue mapping |
| 22 | * that maps a queue to the CPUs that have irq affinity for the corresponding |
| 23 | * vector. |
| 24 | */ |
| 25 | void blk_mq_pci_map_queues(struct blk_mq_queue_map *qmap, struct pci_dev *pdev, |
| 26 | int offset) |
| 27 | { |
| 28 | const struct cpumask *mask; |
| 29 | unsigned int queue, cpu; |
| 30 | |
| 31 | for (queue = 0; queue < qmap->nr_queues; queue++) { |
| 32 | mask = pci_irq_get_affinity(pdev, queue + offset); |
| 33 | if (!mask) |
| 34 | goto fallback; |
| 35 | |
| 36 | for_each_cpu(cpu, mask) |
| 37 | qmap->mq_map[cpu] = qmap->queue_offset + queue; |
| 38 | } |
| 39 | |
| 40 | return; |
| 41 | |
| 42 | fallback: |
| 43 | WARN_ON_ONCE(qmap->nr_queues > 1); |
| 44 | blk_mq_clear_mq_map(qmap); |
| 45 | } |
| 46 | EXPORT_SYMBOL_GPL(blk_mq_pci_map_queues); |