Blob Blame History Raw
From: Shay Drory <shayd@nvidia.com>
Date: Sun, 14 Nov 2021 13:01:21 +0200
Subject: net/mlx5: Introduce control IRQ request API
Patch-mainline: v5.17-rc1
Git-commit: 5256a46bf53872d9888c0d9c34382262b3006599
References: jsc#PED-1549

Currently, IRQ layer have a separate flow for ctrl and comp IRQs, and
the distinction between ctrl and comp IRQs is done in the IRQ layer.

In order to ease the coding and maintenance of the IRQ layer,
introduce a new API for requesting control IRQs -
mlx5_ctrl_irq_request(struct mlx5_core_dev *dev).

Signed-off-by: Shay Drory <shayd@nvidia.com>
Reviewed-by: Moshe Shemesh <moshe@nvidia.com>
Signed-off-by: Saeed Mahameed <saeedm@nvidia.com>
Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
---
 drivers/net/ethernet/mellanox/mlx5/core/eq.c       |    5 +
 drivers/net/ethernet/mellanox/mlx5/core/mlx5_irq.h |    1 
 drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c  |   62 +++++++++++++++++----
 3 files changed, 58 insertions(+), 10 deletions(-)

--- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
@@ -289,7 +289,10 @@ create_map_eq(struct mlx5_core_dev *dev,
 	mlx5_init_fbc(eq->frag_buf.frags, log_eq_stride, log_eq_size, &eq->fbc);
 	init_eq_buf(eq);
 
-	eq->irq = mlx5_irq_request(dev, vecidx, param->affinity);
+	if (vecidx == MLX5_IRQ_EQ_CTRL)
+		eq->irq = mlx5_ctrl_irq_request(dev);
+	else
+		eq->irq = mlx5_irq_request(dev, vecidx, param->affinity);
 	if (IS_ERR(eq->irq)) {
 		err = PTR_ERR(eq->irq);
 		goto err_buf;
--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_irq.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_irq.h
@@ -22,6 +22,7 @@ int mlx5_set_msix_vec_count(struct mlx5_
 			    int msix_vec_count);
 int mlx5_get_default_msix_vec_count(struct mlx5_core_dev *dev, int num_vfs);
 
+struct mlx5_irq *mlx5_ctrl_irq_request(struct mlx5_core_dev *dev);
 struct mlx5_irq *mlx5_irq_request(struct mlx5_core_dev *dev, u16 vecidx,
 				  struct cpumask *affinity);
 void mlx5_irq_release(struct mlx5_irq *irq);
--- a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
@@ -396,23 +396,36 @@ irq_pool_request_vector(struct mlx5_irq_
 	if (IS_ERR(irq) || !affinity)
 		goto unlock;
 	cpumask_copy(irq->mask, affinity);
-	if (!irq_pool_is_sf_pool(pool) && !pool->xa_num_irqs.max &&
-	    cpumask_empty(irq->mask))
-		cpumask_set_cpu(cpumask_first(cpu_online_mask), irq->mask);
 	irq_set_affinity_hint(irq->irqn, irq->mask);
 unlock:
 	mutex_unlock(&pool->lock);
 	return irq;
 }
 
-static struct mlx5_irq_pool *find_sf_irq_pool(struct mlx5_irq_table *irq_table,
-					      int i, struct cpumask *affinity)
+static struct mlx5_irq_pool *sf_ctrl_irq_pool_get(struct mlx5_irq_table *irq_table)
+{
+	return irq_table->sf_ctrl_pool;
+}
+
+static struct mlx5_irq_pool *sf_irq_pool_get(struct mlx5_irq_table *irq_table)
 {
-	if (cpumask_empty(affinity) && i == MLX5_IRQ_EQ_CTRL)
-		return irq_table->sf_ctrl_pool;
 	return irq_table->sf_comp_pool;
 }
 
+static struct mlx5_irq_pool *ctrl_irq_pool_get(struct mlx5_core_dev *dev)
+{
+	struct mlx5_irq_table *irq_table = mlx5_irq_table_get(dev);
+	struct mlx5_irq_pool *pool = NULL;
+
+	if (mlx5_core_is_sf(dev))
+		pool = sf_ctrl_irq_pool_get(irq_table);
+
+	/* In some configs, there won't be a pool of SFs IRQs. Hence, returning
+	 * the PF IRQs pool in case the SF pool doesn't exist.
+	 */
+	return pool ? pool : irq_table->pf_pool;
+}
+
 /**
  * mlx5_irq_release - release an IRQ back to the system.
  * @irq: irq to be released.
@@ -424,6 +437,38 @@ void mlx5_irq_release(struct mlx5_irq *i
 }
 
 /**
+ * mlx5_ctrl_irq_request - request a ctrl IRQ for mlx5 device.
+ * @dev: mlx5 device that requesting the IRQ.
+ *
+ * This function returns a pointer to IRQ, or ERR_PTR in case of error.
+ */
+struct mlx5_irq *mlx5_ctrl_irq_request(struct mlx5_core_dev *dev)
+{
+	struct mlx5_irq_pool *pool = ctrl_irq_pool_get(dev);
+	cpumask_var_t req_mask;
+	struct mlx5_irq *irq;
+
+	if (!zalloc_cpumask_var(&req_mask, GFP_KERNEL))
+		return ERR_PTR(-ENOMEM);
+	cpumask_copy(req_mask, cpu_online_mask);
+	if (!irq_pool_is_sf_pool(pool)) {
+		/* In case we are allocating a control IRQ for PF/VF */
+		if (!pool->xa_num_irqs.max) {
+			cpumask_clear(req_mask);
+			/* In case we only have a single IRQ for PF/VF */
+			cpumask_set_cpu(cpumask_first(cpu_online_mask), req_mask);
+		}
+		/* Allocate the IRQ in the last index of the pool */
+		irq = irq_pool_request_vector(pool, pool->xa_num_irqs.max, req_mask);
+	} else {
+		irq = irq_pool_request_affinity(pool, req_mask);
+	}
+
+	free_cpumask_var(req_mask);
+	return irq;
+}
+
+/**
  * mlx5_irq_request - request an IRQ for mlx5 device.
  * @dev: mlx5 device that requesting the IRQ.
  * @vecidx: vector index of the IRQ. This argument is ignore if affinity is
@@ -440,7 +485,7 @@ struct mlx5_irq *mlx5_irq_request(struct
 	struct mlx5_irq *irq;
 
 	if (mlx5_core_is_sf(dev)) {
-		pool = find_sf_irq_pool(irq_table, vecidx, affinity);
+		pool = sf_irq_pool_get(irq_table);
 		if (!pool)
 			/* we don't have IRQs for SFs, using the PF IRQs */
 			goto pf_irq;
@@ -453,7 +498,6 @@ struct mlx5_irq *mlx5_irq_request(struct
 	}
 pf_irq:
 	pool = irq_table->pf_pool;
-	vecidx = (vecidx == MLX5_IRQ_EQ_CTRL) ? pool->xa_num_irqs.max : vecidx;
 	irq = irq_pool_request_vector(pool, vecidx, affinity);
 out:
 	if (IS_ERR(irq))