Blob Blame History Raw
From: Mark Bloch <mbloch@nvidia.com>
Date: Tue, 1 Mar 2022 17:34:31 +0000
Subject: net/mlx5: Lag, use hash when in roce lag on 4 ports
Patch-mainline: v5.19-rc1
Git-commit: cdf611d17094aea113d7acc32040a1b362dfe2c4
References: jsc#PED-1549

Downstream patches will add support for lag over 4 ports.
In that mode we will only use hash as the uplink selection method.
Using hash instead of queue affinity (before this patch) offers key
advantages like:

- Align ports selection method with the method used by the bond device

- Better packets distribution where a single queue can transmit from
  multiple ports (with queue affinity a queue is bound to a single port
  regardless of the packet being sent).

- In case of failover we traffic is split between multiple ports and not
  a single one like in queue affinity.

Going forward it was decided that queue affinity will be deprecated
as using hash provides a better user experience which means on 4 ports
HCAs hash will always be used.

Future work will add hash support for 2 ports HCAs as well.

Signed-off-by: Mark Bloch <mbloch@nvidia.com>
Reviewed-by: Maor Gottlieb <maorg@nvidia.com>
Signed-off-by: Saeed Mahameed <saeedm@nvidia.com>
Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
---
 drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c |   45 +++++++++++++++++-----
 1 file changed, 36 insertions(+), 9 deletions(-)

--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
@@ -310,17 +310,41 @@ void mlx5_modify_lag(struct mlx5_lag *ld
 		mlx5_lag_drop_rule_setup(ldev, tracker);
 }
 
-static void mlx5_lag_set_port_sel_mode(struct mlx5_lag *ldev,
-				       struct lag_tracker *tracker, u8 *flags)
+#define MLX5_LAG_ROCE_HASH_PORTS_SUPPORTED 4
+static int mlx5_lag_set_port_sel_mode_roce(struct mlx5_lag *ldev,
+					   struct lag_tracker *tracker, u8 *flags)
+{
+	struct lag_func *dev0 = &ldev->pf[MLX5_LAG_P1];
+
+	if (ldev->ports == MLX5_LAG_ROCE_HASH_PORTS_SUPPORTED) {
+		/* Four ports are support only in hash mode */
+		if (!MLX5_CAP_PORT_SELECTION(dev0->dev, port_select_flow_table))
+			return -EINVAL;
+		*flags |= MLX5_LAG_FLAG_HASH_BASED;
+	}
+
+	return 0;
+}
+
+static int mlx5_lag_set_port_sel_mode_offloads(struct mlx5_lag *ldev,
+					       struct lag_tracker *tracker, u8 *flags)
 {
-	bool roce_lag = !!(*flags & MLX5_LAG_FLAG_ROCE);
 	struct lag_func *dev0 = &ldev->pf[MLX5_LAG_P1];
 
-	if (roce_lag ||
-	    !MLX5_CAP_PORT_SELECTION(dev0->dev, port_select_flow_table) ||
-	    tracker->tx_type != NETDEV_LAG_TX_TYPE_HASH)
-		return;
-	*flags |= MLX5_LAG_FLAG_HASH_BASED;
+	if (MLX5_CAP_PORT_SELECTION(dev0->dev, port_select_flow_table) &&
+	    tracker->tx_type == NETDEV_LAG_TX_TYPE_HASH)
+		*flags |= MLX5_LAG_FLAG_HASH_BASED;
+	return 0;
+}
+
+static int mlx5_lag_set_port_sel_mode(struct mlx5_lag *ldev,
+				      struct lag_tracker *tracker, u8 *flags)
+{
+	bool roce_lag = !!(*flags & MLX5_LAG_FLAG_ROCE);
+
+	if (roce_lag)
+		return mlx5_lag_set_port_sel_mode_roce(ldev, tracker, flags);
+	return mlx5_lag_set_port_sel_mode_offloads(ldev, tracker, flags);
 }
 
 static char *get_str_port_sel_mode(u8 flags)
@@ -382,7 +406,10 @@ int mlx5_activate_lag(struct mlx5_lag *l
 
 	mlx5_infer_tx_affinity_mapping(tracker, &ldev->v2p_map[MLX5_LAG_P1],
 				       &ldev->v2p_map[MLX5_LAG_P2]);
-	mlx5_lag_set_port_sel_mode(ldev, tracker, &flags);
+	err = mlx5_lag_set_port_sel_mode(ldev, tracker, &flags);
+	if (err)
+		return err;
+
 	if (flags & MLX5_LAG_FLAG_HASH_BASED) {
 		err = mlx5_lag_port_sel_create(ldev, tracker->hash_type,
 					       ldev->v2p_map[MLX5_LAG_P1],