Blob Blame History Raw
From: Eli Cohen <elic@nvidia.com>
Date: Mon, 23 Aug 2021 08:21:20 +0300
Subject: vdpa/mlx5: Decouple virtqueue callback from struct
 mlx5_vdpa_virtqueue
Patch-mainline: v5.15-rc1
Git-commit: db296d252dfb977885391e47d3fb6ac5a1f9601c
References: jsc#PED-1549

Instead, define an array of struct vdpa_callback on struct mlx5_vdpa_net
and use it to store callbacks for any virtqueue provided. This is
required due to the fact that callback configurations arrive before feature
negotiation. With control VQ and multiqueue introduced next we want to
save the information until after feature negotiation where we know the
CVQ index.

Signed-off-by: Eli Cohen <elic@nvidia.com>
Link: https://lore.kernel.org/r/20210823052123.14909-4-elic@nvidia.com
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
---
 drivers/vdpa/mlx5/net/mlx5_vnet.c |   17 +++++++++--------
 1 file changed, 9 insertions(+), 8 deletions(-)

--- a/drivers/vdpa/mlx5/net/mlx5_vnet.c
+++ b/drivers/vdpa/mlx5/net/mlx5_vnet.c
@@ -90,7 +90,6 @@ struct mlx5_vq_restore_info {
 	u16 avail_index;
 	u16 used_index;
 	bool ready;
-	struct vdpa_callback cb;
 	bool restore;
 };
 
@@ -100,7 +99,6 @@ struct mlx5_vdpa_virtqueue {
 	u64 device_addr;
 	u64 driver_addr;
 	u32 num_ent;
-	struct vdpa_callback event_cb;
 
 	/* Resources for implementing the notification channel from the device
 	 * to the driver. fwqp is the firmware end of an RC connection; the
@@ -140,6 +138,7 @@ struct mlx5_vdpa_net {
 	struct mlx5_vdpa_net_resources res;
 	struct virtio_net_config config;
 	struct mlx5_vdpa_virtqueue vqs[MLX5_MAX_SUPPORTED_VQS];
+	struct vdpa_callback event_cbs[MLX5_MAX_SUPPORTED_VQS + 1];
 
 	/* Serialize vq resources creation and destruction. This is required
 	 * since memory map might change and we need to destroy and create
@@ -481,6 +480,10 @@ static int mlx5_vdpa_poll_one(struct mlx
 
 static void mlx5_vdpa_handle_completions(struct mlx5_vdpa_virtqueue *mvq, int num)
 {
+	struct mlx5_vdpa_net *ndev = mvq->ndev;
+	struct vdpa_callback *event_cb;
+
+	event_cb = &ndev->event_cbs[mvq->index];
 	mlx5_cq_set_ci(&mvq->cq.mcq);
 
 	/* make sure CQ cosumer update is visible to the hardware before updating
@@ -488,8 +491,8 @@ static void mlx5_vdpa_handle_completions
 	 */
 	dma_wmb();
 	rx_post(&mvq->vqqp, num);
-	if (mvq->event_cb.callback)
-		mvq->event_cb.callback(mvq->event_cb.private);
+	if (event_cb->callback)
+		event_cb->callback(event_cb->private);
 }
 
 static void mlx5_vdpa_cq_comp(struct mlx5_core_cq *mcq, struct mlx5_eqe *eqe)
@@ -1384,9 +1387,8 @@ static void mlx5_vdpa_set_vq_cb(struct v
 {
 	struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
 	struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
-	struct mlx5_vdpa_virtqueue *vq = &ndev->vqs[idx];
 
-	vq->event_cb = *cb;
+	ndev->event_cbs[idx] = *cb;
 }
 
 static void mlx5_vdpa_set_vq_ready(struct vdpa_device *vdev, u16 idx, bool ready)
@@ -1623,7 +1625,6 @@ static int save_channel_info(struct mlx5
 	ri->desc_addr = mvq->desc_addr;
 	ri->device_addr = mvq->device_addr;
 	ri->driver_addr = mvq->driver_addr;
-	ri->cb = mvq->event_cb;
 	ri->restore = true;
 	return 0;
 }
@@ -1668,7 +1669,6 @@ static void restore_channels_info(struct
 		mvq->desc_addr = ri->desc_addr;
 		mvq->device_addr = ri->device_addr;
 		mvq->driver_addr = ri->driver_addr;
-		mvq->event_cb = ri->cb;
 	}
 }
 
@@ -1791,6 +1791,7 @@ static void mlx5_vdpa_set_status(struct
 		mlx5_vdpa_destroy_mr(&ndev->mvdev);
 		ndev->mvdev.status = 0;
 		ndev->mvdev.mlx_features = 0;
+		memset(ndev->event_cbs, 0, sizeof(ndev->event_cbs));
 		++mvdev->generation;
 		if (MLX5_CAP_GEN(mvdev->mdev, umem_uid_0)) {
 			if (mlx5_vdpa_create_mr(mvdev, NULL))