Loading block/blk-mq-rdma.c +4 −4 Original line number Diff line number Diff line Loading @@ -29,24 +29,24 @@ * @set->nr_hw_queues, or @dev does not provide an affinity mask for a * vector, we fallback to the naive mapping. */ int blk_mq_rdma_map_queues(struct blk_mq_tag_set *set, int blk_mq_rdma_map_queues(struct blk_mq_queue_map *map, struct ib_device *dev, int first_vec) { const struct cpumask *mask; unsigned int queue, cpu; for (queue = 0; queue < set->nr_hw_queues; queue++) { for (queue = 0; queue < map->nr_queues; queue++) { mask = ib_get_vector_affinity(dev, first_vec + queue); if (!mask) goto fallback; for_each_cpu(cpu, mask) set->map[0].mq_map[cpu] = queue; map->mq_map[cpu] = map->queue_offset + queue; } return 0; fallback: return blk_mq_map_queues(&set->map[0]); return blk_mq_map_queues(map); } EXPORT_SYMBOL_GPL(blk_mq_rdma_map_queues); drivers/nvme/host/rdma.c +1 −1 Original line number Diff line number Diff line Loading @@ -1751,7 +1751,7 @@ static int nvme_rdma_map_queues(struct blk_mq_tag_set *set) { struct nvme_rdma_ctrl *ctrl = set->driver_data; return blk_mq_rdma_map_queues(set, ctrl->device->dev, 0); return blk_mq_rdma_map_queues(&set->map[0], ctrl->device->dev, 0); } static const struct blk_mq_ops nvme_rdma_mq_ops = { Loading include/linux/blk-mq-rdma.h +1 −1 Original line number Diff line number Diff line Loading @@ -4,7 +4,7 @@ struct blk_mq_tag_set; struct ib_device; int blk_mq_rdma_map_queues(struct blk_mq_tag_set *set, int blk_mq_rdma_map_queues(struct blk_mq_queue_map *map, struct ib_device *dev, int first_vec); #endif /* _LINUX_BLK_MQ_RDMA_H */ Loading
block/blk-mq-rdma.c +4 −4 Original line number Diff line number Diff line Loading @@ -29,24 +29,24 @@ * @set->nr_hw_queues, or @dev does not provide an affinity mask for a * vector, we fallback to the naive mapping. */ int blk_mq_rdma_map_queues(struct blk_mq_tag_set *set, int blk_mq_rdma_map_queues(struct blk_mq_queue_map *map, struct ib_device *dev, int first_vec) { const struct cpumask *mask; unsigned int queue, cpu; for (queue = 0; queue < set->nr_hw_queues; queue++) { for (queue = 0; queue < map->nr_queues; queue++) { mask = ib_get_vector_affinity(dev, first_vec + queue); if (!mask) goto fallback; for_each_cpu(cpu, mask) set->map[0].mq_map[cpu] = queue; map->mq_map[cpu] = map->queue_offset + queue; } return 0; fallback: return blk_mq_map_queues(&set->map[0]); return blk_mq_map_queues(map); } EXPORT_SYMBOL_GPL(blk_mq_rdma_map_queues);
drivers/nvme/host/rdma.c +1 −1 Original line number Diff line number Diff line Loading @@ -1751,7 +1751,7 @@ static int nvme_rdma_map_queues(struct blk_mq_tag_set *set) { struct nvme_rdma_ctrl *ctrl = set->driver_data; return blk_mq_rdma_map_queues(set, ctrl->device->dev, 0); return blk_mq_rdma_map_queues(&set->map[0], ctrl->device->dev, 0); } static const struct blk_mq_ops nvme_rdma_mq_ops = { Loading
include/linux/blk-mq-rdma.h +1 −1 Original line number Diff line number Diff line Loading @@ -4,7 +4,7 @@ struct blk_mq_tag_set; struct ib_device; int blk_mq_rdma_map_queues(struct blk_mq_tag_set *set, int blk_mq_rdma_map_queues(struct blk_mq_queue_map *map, struct ib_device *dev, int first_vec); #endif /* _LINUX_BLK_MQ_RDMA_H */