Hannes Reinecke da5666
From: Weiping Zhang <zhangweiping@didiglobal.com>
Hannes Reinecke da5666
Date: Sat, 2 May 2020 15:29:41 +0800
Hannes Reinecke da5666
Subject: [PATCH] nvme-pci: align io queue count with allocted nvme_queue in
Hannes Reinecke da5666
Git-commit: 2a5bcfdd41d68559567cec3c124a75e093506cc1
Hannes Reinecke da5666
Patch-mainline: v5.8-rc1
Hannes Reinecke da5666
References: git-fixes
Hannes Reinecke da5666
 nvme_probe
Hannes Reinecke da5666
Hannes Reinecke da5666
Since commit 147b27e4bd08 ("nvme-pci: allocate device queues storage
Hannes Reinecke da5666
space at probe"), nvme_alloc_queue does not alloc the nvme queues
Hannes Reinecke da5666
itself anymore.
Hannes Reinecke da5666
Hannes Reinecke da5666
If the write/poll_queues module parameters are changed at runtime to
Hannes Reinecke da5666
values larger than the number of allocated queues in nvme_probe,
Hannes Reinecke da5666
nvme_alloc_queue will access unallocated memory.
Hannes Reinecke da5666
Hannes Reinecke da5666
Add a new nr_allocated_queues member to struct nvme_dev to record how
Hannes Reinecke da5666
many queues were alloctated in nvme_probe to avoid using more than the
Hannes Reinecke da5666
allocated queues after a reset following a change to the
Hannes Reinecke da5666
write/poll_queues module parameters.
Hannes Reinecke da5666
Hannes Reinecke da5666
Also add nr_write_queues and nr_poll_queues members to allow refreshing
Hannes Reinecke da5666
the number of write and poll queues based on a change to the module
Hannes Reinecke da5666
parameters when resetting the controller.
Hannes Reinecke da5666
Hannes Reinecke da5666
Fixes: 147b27e4bd08 ("nvme-pci: allocate device queues storage space at probe")
Hannes Reinecke da5666
Signed-off-by: Weiping Zhang <zhangweiping@didiglobal.com>
Hannes Reinecke da5666
Reviewed-by: Keith Busch <kbusch@kernel.org>
Hannes Reinecke da5666
Reviewed-by: Max Gurtovoy <maxg@mellanox.com>
Hannes Reinecke da5666
[hch: add nvme_max_io_queues, update the commit message]
Hannes Reinecke da5666
Signed-off-by: Christoph Hellwig <hch@lst.de>
Hannes Reinecke da5666
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Hannes Reinecke da5666
Acked-by: Hannes Reinecke <hare@suse.com>
Hannes Reinecke da5666
---
Hannes Reinecke da5666
 drivers/nvme/host/pci.c | 57 ++++++++++++++++++++++++-----------------
Hannes Reinecke da5666
 1 file changed, 33 insertions(+), 24 deletions(-)
Hannes Reinecke da5666
Hannes Reinecke da5666
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
Hannes Reinecke da5666
index b945e9a89883..b0978ac554d5 100644
Hannes Reinecke da5666
--- a/drivers/nvme/host/pci.c
Hannes Reinecke da5666
+++ b/drivers/nvme/host/pci.c
Hannes Reinecke da5666
@@ -128,6 +128,9 @@ struct nvme_dev {
Hannes Reinecke da5666
 	dma_addr_t host_mem_descs_dma;
Hannes Reinecke da5666
 	struct nvme_host_mem_buf_desc *host_mem_descs;
Hannes Reinecke da5666
 	void **host_mem_desc_bufs;
Hannes Reinecke da5666
+	unsigned int nr_allocated_queues;
Hannes Reinecke da5666
+	unsigned int nr_write_queues;
Hannes Reinecke da5666
+	unsigned int nr_poll_queues;
Hannes Reinecke da5666
 };
Hannes Reinecke da5666
 
Hannes Reinecke da5666
 static int io_queue_depth_set(const char *val, const struct kernel_param *kp)
Hannes Reinecke da5666
@@ -208,25 +211,14 @@ struct nvme_iod {
Hannes Reinecke da5666
 	struct scatterlist *sg;
Hannes Reinecke da5666
 };
Hannes Reinecke da5666
 
Hannes Reinecke da5666
-static unsigned int max_io_queues(void)
Hannes Reinecke da5666
+static inline unsigned int nvme_dbbuf_size(struct nvme_dev *dev)
Hannes Reinecke da5666
 {
Hannes Reinecke da5666
-	return num_possible_cpus() + write_queues + poll_queues;
Hannes Reinecke da5666
-}
Hannes Reinecke da5666
-
Hannes Reinecke da5666
-static unsigned int max_queue_count(void)
Hannes Reinecke da5666
-{
Hannes Reinecke da5666
-	/* IO queues + admin queue */
Hannes Reinecke da5666
-	return 1 + max_io_queues();
Hannes Reinecke da5666
-}
Hannes Reinecke da5666
-
Hannes Reinecke da5666
-static inline unsigned int nvme_dbbuf_size(u32 stride)
Hannes Reinecke da5666
-{
Hannes Reinecke da5666
-	return (max_queue_count() * 8 * stride);
Hannes Reinecke da5666
+	return dev->nr_allocated_queues * 8 * dev->db_stride;
Hannes Reinecke da5666
 }
Hannes Reinecke da5666
 
Hannes Reinecke da5666
 static int nvme_dbbuf_dma_alloc(struct nvme_dev *dev)
Hannes Reinecke da5666
 {
Hannes Reinecke da5666
-	unsigned int mem_size = nvme_dbbuf_size(dev->db_stride);
Hannes Reinecke da5666
+	unsigned int mem_size = nvme_dbbuf_size(dev);
Hannes Reinecke da5666
 
Hannes Reinecke da5666
 	if (dev->dbbuf_dbs)
Hannes Reinecke da5666
 		return 0;
Hannes Reinecke da5666
@@ -251,7 +243,7 @@ static int nvme_dbbuf_dma_alloc(struct nvme_dev *dev)
Hannes Reinecke da5666
 
Hannes Reinecke da5666
 static void nvme_dbbuf_dma_free(struct nvme_dev *dev)
Hannes Reinecke da5666
 {
Hannes Reinecke da5666
-	unsigned int mem_size = nvme_dbbuf_size(dev->db_stride);
Hannes Reinecke da5666
+	unsigned int mem_size = nvme_dbbuf_size(dev);
Hannes Reinecke da5666
 
Hannes Reinecke da5666
 	if (dev->dbbuf_dbs) {
Hannes Reinecke da5666
 		dma_free_coherent(dev->dev, mem_size,
Hannes Reinecke da5666
@@ -1981,7 +1973,7 @@ static int nvme_setup_host_mem(struct nvme_dev *dev)
Hannes Reinecke da5666
 static void nvme_calc_irq_sets(struct irq_affinity *affd, unsigned int nrirqs)
Hannes Reinecke da5666
 {
Hannes Reinecke da5666
 	struct nvme_dev *dev = affd->priv;
Hannes Reinecke da5666
-	unsigned int nr_read_queues;
Hannes Reinecke da5666
+	unsigned int nr_read_queues, nr_write_queues = dev->nr_write_queues;
Hannes Reinecke da5666
 
Hannes Reinecke da5666
 	/*
Hannes Reinecke da5666
 	 * If there is no interupt available for queues, ensure that
Hannes Reinecke da5666
@@ -1997,12 +1989,12 @@ static void nvme_calc_irq_sets(struct irq_affinity *affd, unsigned int nrirqs)
Hannes Reinecke da5666
 	if (!nrirqs) {
Hannes Reinecke da5666
 		nrirqs = 1;
Hannes Reinecke da5666
 		nr_read_queues = 0;
Hannes Reinecke da5666
-	} else if (nrirqs == 1 || !write_queues) {
Hannes Reinecke da5666
+	} else if (nrirqs == 1 || !nr_write_queues) {
Hannes Reinecke da5666
 		nr_read_queues = 0;
Hannes Reinecke da5666
-	} else if (write_queues >= nrirqs) {
Hannes Reinecke da5666
+	} else if (nr_write_queues >= nrirqs) {
Hannes Reinecke da5666
 		nr_read_queues = 1;
Hannes Reinecke da5666
 	} else {
Hannes Reinecke da5666
-		nr_read_queues = nrirqs - write_queues;
Hannes Reinecke da5666
+		nr_read_queues = nrirqs - nr_write_queues;
Hannes Reinecke da5666
 	}
Hannes Reinecke da5666
 
Hannes Reinecke da5666
 	dev->io_queues[HCTX_TYPE_DEFAULT] = nrirqs - nr_read_queues;
Hannes Reinecke da5666
@@ -2026,7 +2018,7 @@ static int nvme_setup_irqs(struct nvme_dev *dev, unsigned int nr_io_queues)
Hannes Reinecke da5666
 	 * Poll queues don't need interrupts, but we need at least one IO
Hannes Reinecke da5666
 	 * queue left over for non-polled IO.
Hannes Reinecke da5666
 	 */
Hannes Reinecke da5666
-	this_p_queues = poll_queues;
Hannes Reinecke da5666
+	this_p_queues = dev->nr_poll_queues;
Hannes Reinecke da5666
 	if (this_p_queues >= nr_io_queues) {
Hannes Reinecke da5666
 		this_p_queues = nr_io_queues - 1;
Hannes Reinecke da5666
 		irq_queues = 1;
Hannes Reinecke da5666
@@ -2056,14 +2048,25 @@ static void nvme_disable_io_queues(struct nvme_dev *dev)
Hannes Reinecke da5666
 		__nvme_disable_io_queues(dev, nvme_admin_delete_cq);
Hannes Reinecke da5666
 }
Hannes Reinecke da5666
 
Hannes Reinecke da5666
+static unsigned int nvme_max_io_queues(struct nvme_dev *dev)
Hannes Reinecke da5666
+{
Hannes Reinecke da5666
+	return num_possible_cpus() + dev->nr_write_queues + dev->nr_poll_queues;
Hannes Reinecke da5666
+}
Hannes Reinecke da5666
+
Hannes Reinecke da5666
 static int nvme_setup_io_queues(struct nvme_dev *dev)
Hannes Reinecke da5666
 {
Hannes Reinecke da5666
 	struct nvme_queue *adminq = &dev->queues[0];
Hannes Reinecke da5666
 	struct pci_dev *pdev = to_pci_dev(dev->dev);
Hannes Reinecke da5666
-	int result, nr_io_queues;
Hannes Reinecke da5666
+	unsigned int nr_io_queues;
Hannes Reinecke da5666
 	unsigned long size;
Hannes Reinecke da5666
+	int result;
Hannes Reinecke da5666
 
Hannes Reinecke da5666
-	nr_io_queues = max_io_queues();
Hannes Reinecke da5666
+	/*
Hannes Reinecke da5666
+	 * Sample the module parameters once at reset time so that we have
Hannes Reinecke da5666
+	 * stable values to work with.
Hannes Reinecke da5666
+	 */
Hannes Reinecke da5666
+	dev->nr_write_queues = write_queues;
Hannes Reinecke da5666
+	dev->nr_poll_queues = poll_queues;
Hannes Reinecke da5666
 
Hannes Reinecke da5666
 	/*
Hannes Reinecke da5666
 	 * If tags are shared with admin queue (Apple bug), then
Hannes Reinecke da5666
@@ -2071,6 +2074,9 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
Hannes Reinecke da5666
 	 */
Hannes Reinecke da5666
 	if (dev->ctrl.quirks & NVME_QUIRK_SHARED_TAGS)
Hannes Reinecke da5666
 		nr_io_queues = 1;
Hannes Reinecke da5666
+	else
Hannes Reinecke da5666
+		nr_io_queues = min(nvme_max_io_queues(dev),
Hannes Reinecke da5666
+				   dev->nr_allocated_queues - 1);
Hannes Reinecke da5666
 
Hannes Reinecke da5666
 	result = nvme_set_queue_count(&dev->ctrl, &nr_io_queues);
Hannes Reinecke da5666
 	if (result < 0)
Hannes Reinecke da5666
@@ -2745,8 +2751,11 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
Hannes Reinecke da5666
 	if (!dev)
Hannes Reinecke da5666
 		return -ENOMEM;
Hannes Reinecke da5666
 
Hannes Reinecke da5666
-	dev->queues = kcalloc_node(max_queue_count(), sizeof(struct nvme_queue),
Hannes Reinecke da5666
-					GFP_KERNEL, node);
Hannes Reinecke da5666
+	dev->nr_write_queues = write_queues;
Hannes Reinecke da5666
+	dev->nr_poll_queues = poll_queues;
Hannes Reinecke da5666
+	dev->nr_allocated_queues = nvme_max_io_queues(dev) + 1;
Hannes Reinecke da5666
+	dev->queues = kcalloc_node(dev->nr_allocated_queues,
Hannes Reinecke da5666
+			sizeof(struct nvme_queue), GFP_KERNEL, node);
Hannes Reinecke da5666
 	if (!dev->queues)
Hannes Reinecke da5666
 		goto free;
Hannes Reinecke da5666
 
Hannes Reinecke da5666
-- 
Hannes Reinecke da5666
2.29.2
Hannes Reinecke da5666