Blob Blame History Raw
From: Eli Cohen <elic@nvidia.com>
Date: Mon, 23 Aug 2021 08:21:21 +0300
Subject: vdpa/mlx5: Ensure valid indices are provided
Patch-mainline: v5.15-rc1
Git-commit: e4fc66508c884b87422a98259cdfe135edae130f
References: jsc#PED-1549

Following patches add control virtuqeue and multiqueue support. We want
to verify that the index value to callbacks referencing a virtqueue is
valid.

The logic defining valid indices is as follows:
CVQ clear: 0 and 1.
CVQ set, MQ clear: 0, 1 and 2
CVQ set, MQ set: 0..nvq where nvq is whatever provided to
_vdpa_register_device()

Signed-off-by: Eli Cohen <elic@nvidia.com>
Link: https://lore.kernel.org/r/20210823052123.14909-5-elic@nvidia.com
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
---
 drivers/vdpa/mlx5/core/mlx5_vdpa.h |    1 
 drivers/vdpa/mlx5/net/mlx5_vnet.c  |   54 +++++++++++++++++++++++++++++++++++++
 2 files changed, 55 insertions(+)

--- a/drivers/vdpa/mlx5/core/mlx5_vdpa.h
+++ b/drivers/vdpa/mlx5/core/mlx5_vdpa.h
@@ -56,6 +56,7 @@ struct mlx5_vdpa_dev {
 	u64 actual_features;
 	u8 status;
 	u32 max_vqs;
+	u16 max_idx;
 	u32 generation;
 
 	struct mlx5_vdpa_mr mr;
--- a/drivers/vdpa/mlx5/net/mlx5_vnet.c
+++ b/drivers/vdpa/mlx5/net/mlx5_vnet.c
@@ -45,6 +45,8 @@ MODULE_LICENSE("Dual BSD/GPL");
 	(VIRTIO_CONFIG_S_ACKNOWLEDGE | VIRTIO_CONFIG_S_DRIVER | VIRTIO_CONFIG_S_DRIVER_OK |        \
 	 VIRTIO_CONFIG_S_FEATURES_OK | VIRTIO_CONFIG_S_NEEDS_RESET | VIRTIO_CONFIG_S_FAILED)
 
+#define MLX5_FEATURE(_mvdev, _feature) (!!((_mvdev)->actual_features & BIT_ULL(_feature)))
+
 struct mlx5_vdpa_net_resources {
 	u32 tisn;
 	u32 tdn;
@@ -133,6 +135,14 @@ struct mlx5_vdpa_virtqueue {
  */
 #define MLX5_MAX_SUPPORTED_VQS 16
 
+static bool is_index_valid(struct mlx5_vdpa_dev *mvdev, u16 idx)
+{
+	if (unlikely(idx > mvdev->max_idx))
+		return false;
+
+	return true;
+}
+
 struct mlx5_vdpa_net {
 	struct mlx5_vdpa_dev mvdev;
 	struct mlx5_vdpa_net_resources res;
@@ -1354,6 +1364,9 @@ static void mlx5_vdpa_kick_vq(struct vdp
 	struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
 	struct mlx5_vdpa_virtqueue *mvq = &ndev->vqs[idx];
 
+	if (!is_index_valid(mvdev, idx))
+		return;
+
 	if (unlikely(!mvq->ready))
 		return;
 
@@ -1367,6 +1380,9 @@ static int mlx5_vdpa_set_vq_address(stru
 	struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
 	struct mlx5_vdpa_virtqueue *mvq = &ndev->vqs[idx];
 
+	if (!is_index_valid(mvdev, idx))
+		return -EINVAL;
+
 	mvq->desc_addr = desc_area;
 	mvq->device_addr = device_area;
 	mvq->driver_addr = driver_area;
@@ -1379,6 +1395,9 @@ static void mlx5_vdpa_set_vq_num(struct
 	struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
 	struct mlx5_vdpa_virtqueue *mvq;
 
+	if (!is_index_valid(mvdev, idx))
+		return;
+
 	mvq = &ndev->vqs[idx];
 	mvq->num_ent = num;
 }
@@ -1397,6 +1416,9 @@ static void mlx5_vdpa_set_vq_ready(struc
 	struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
 	struct mlx5_vdpa_virtqueue *mvq = &ndev->vqs[idx];
 
+	if (!is_index_valid(mvdev, idx))
+		return;
+
 	if (!ready)
 		suspend_vq(ndev, mvq);
 
@@ -1409,6 +1431,9 @@ static bool mlx5_vdpa_get_vq_ready(struc
 	struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
 	struct mlx5_vdpa_virtqueue *mvq = &ndev->vqs[idx];
 
+	if (!is_index_valid(mvdev, idx))
+		return false;
+
 	return mvq->ready;
 }
 
@@ -1419,6 +1444,9 @@ static int mlx5_vdpa_set_vq_state(struct
 	struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
 	struct mlx5_vdpa_virtqueue *mvq = &ndev->vqs[idx];
 
+	if (!is_index_valid(mvdev, idx))
+		return -EINVAL;
+
 	if (mvq->fw_state == MLX5_VIRTIO_NET_Q_OBJECT_STATE_RDY) {
 		mlx5_vdpa_warn(mvdev, "can't modify available index\n");
 		return -EINVAL;
@@ -1437,6 +1465,9 @@ static int mlx5_vdpa_get_vq_state(struct
 	struct mlx5_virtq_attr attr;
 	int err;
 
+	if (!is_index_valid(mvdev, idx))
+		return -EINVAL;
+
 	/* If the virtq object was destroyed, use the value saved at
 	 * the last minute of suspend_vq. This caters for userspace
 	 * that cares about emulating the index after vq is stopped.
@@ -1556,6 +1587,24 @@ static __virtio16 cpu_to_mlx5vdpa16(stru
 	return __cpu_to_virtio16(mlx5_vdpa_is_little_endian(mvdev), val);
 }
 
+static void update_cvq_info(struct mlx5_vdpa_dev *mvdev)
+{
+	if (MLX5_FEATURE(mvdev, VIRTIO_NET_F_CTRL_VQ)) {
+		if (MLX5_FEATURE(mvdev, VIRTIO_NET_F_MQ)) {
+			/* MQ supported. CVQ index is right above the last data virtqueue's */
+			mvdev->max_idx = mvdev->max_vqs;
+		} else {
+			/* Only CVQ supportted. data virtqueues occupy indices 0 and 1.
+			 * CVQ gets index 2
+			 */
+			mvdev->max_idx = 2;
+		}
+	} else {
+		/* Two data virtqueues only: one for rx and one for tx */
+		mvdev->max_idx = 1;
+	}
+}
+
 static int mlx5_vdpa_set_features(struct vdpa_device *vdev, u64 features)
 {
 	struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
@@ -1571,6 +1620,7 @@ static int mlx5_vdpa_set_features(struct
 	ndev->mvdev.actual_features = features & ndev->mvdev.mlx_features;
 	ndev->config.mtu = cpu_to_mlx5vdpa16(mvdev, ndev->mtu);
 	ndev->config.status |= cpu_to_mlx5vdpa16(mvdev, VIRTIO_NET_S_LINK_UP);
+	update_cvq_info(mvdev);
 	return err;
 }
 
@@ -1792,6 +1842,7 @@ static void mlx5_vdpa_set_status(struct
 		ndev->mvdev.status = 0;
 		ndev->mvdev.mlx_features = 0;
 		memset(ndev->event_cbs, 0, sizeof(ndev->event_cbs));
+		ndev->mvdev.actual_features = 0;
 		++mvdev->generation;
 		if (MLX5_CAP_GEN(mvdev->mdev, umem_uid_0)) {
 			if (mlx5_vdpa_create_mr(mvdev, NULL))
@@ -1892,6 +1943,9 @@ static struct vdpa_notification_area mlx
 	struct mlx5_vdpa_net *ndev;
 	phys_addr_t addr;
 
+	if (!is_index_valid(mvdev, idx))
+		return ret;
+
 	/* If SF BAR size is smaller than PAGE_SIZE, do not use direct
 	 * notification to avoid the risk of mapping pages that contain BAR of more
 	 * than one SF