Blob Blame History Raw
From: Moshe Tal <moshet@nvidia.com>
Date: Tue, 9 Feb 2021 14:47:05 -0800
Subject: net/mlx5e: HTB, move ids to selq_params struct
Patch-mainline: v6.0-rc1
Git-commit: 4f8d1d3adc8d126c600fff55ff81855d50d5df3e
References: jsc#PED-1549

HTB id fields are needed for selecting queue. Moving them to the
selq_params struct will simplify synchronization between control flow
and mlx5e_select_queues and will keep the IDs in the hot cacheline of
mlx5e_selq_params.

Replace mlx5e_selq_prepare() with separate functions that change subsets
of parameters, while keeping the rest.

This also will be useful to hide mlx5e_htb structure from the rest of the
driver in a later patch in this series.

Signed-off-by: Saeed Mahameed <saeedm@nvidia.com>
Signed-off-by: Moshe Tal <moshet@nvidia.com>
Reviewed-by: Tariq Toukan <tariqt@nvidia.com>
Reviewed-by: Maxim Mikityanskiy <maximmi@nvidia.com>
Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
---
 drivers/net/ethernet/mellanox/mlx5/core/en.h         |    2 
 drivers/net/ethernet/mellanox/mlx5/core/en/qos.c     |   23 ++-------
 drivers/net/ethernet/mellanox/mlx5/core/en/selq.c    |   48 ++++++++++++++++---
 drivers/net/ethernet/mellanox/mlx5/core/en/selq.h    |    4 +
 drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c |    4 -
 drivers/net/ethernet/mellanox/mlx5/core/en_main.c    |   15 +++--
 6 files changed, 62 insertions(+), 34 deletions(-)

--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -904,8 +904,6 @@ struct mlx5e_htb {
 	DECLARE_BITMAP(qos_used_qids, MLX5E_QOS_MAX_LEAF_NODES);
 	struct mlx5e_sq_stats **qos_sq_stats;
 	u16 max_qos_sqs;
-	u16 maj_id;
-	u16 defcls;
 };
 
 struct mlx5e_trap;
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c
@@ -144,9 +144,9 @@ static void mlx5e_sw_node_delete(struct
 static u16 mlx5e_qid_from_qos(struct mlx5e_channels *chs, u16 qid)
 {
 	/* These channel params are safe to access from the datapath, because:
-	 * 1. This function is called only after checking priv->htb.maj_id != 0,
+	 * 1. This function is called only after checking selq->htb_maj_id != 0,
 	 *    and the number of queues can't change while HTB offload is active.
-	 * 2. When priv->htb.maj_id becomes 0, synchronize_rcu waits for
+	 * 2. When selq->htb_maj_id becomes 0, synchronize_rcu waits for
 	 *    mlx5e_select_queue to finish while holding priv->state_lock,
 	 *    preventing other code from changing the number of queues.
 	 */
@@ -417,7 +417,7 @@ int mlx5e_qos_open_queues(struct mlx5e_p
 	struct mlx5e_qos_node *node = NULL;
 	int bkt, err;
 
-	if (!priv->htb.maj_id)
+	if (!mlx5e_selq_is_htb_enabled(&priv->selq))
 		return 0;
 
 	err = mlx5e_qos_alloc_queues(priv, chs);
@@ -501,10 +501,10 @@ mlx5e_htb_root_add(struct mlx5e_priv *pr
 		return -EOPNOTSUPP;
 	}
 
+	mlx5e_selq_prepare_htb(&priv->selq, htb_maj_id, htb_defcls);
+
 	opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
 	if (opened) {
-		mlx5e_selq_prepare(&priv->selq, &priv->channels.params, true);
-
 		err = mlx5e_qos_alloc_queues(priv, &priv->channels);
 		if (err)
 			goto err_cancel_selq;
@@ -522,14 +522,7 @@ mlx5e_htb_root_add(struct mlx5e_priv *pr
 		goto err_sw_node_delete;
 	}
 
-	WRITE_ONCE(priv->htb.defcls, htb_defcls);
-	/* Order maj_id after defcls - pairs with
-	 * mlx5e_select_queue/mlx5e_select_htb_queues.
-	 */
-	smp_store_release(&priv->htb.maj_id, htb_maj_id);
-
-	if (opened)
-		mlx5e_selq_apply(&priv->selq);
+	mlx5e_selq_apply(&priv->selq);
 
 	return 0;
 
@@ -556,11 +549,9 @@ static int mlx5e_htb_root_del(struct mlx
 	 */
 	synchronize_net();
 
-	mlx5e_selq_prepare(&priv->selq, &priv->channels.params, false);
+	mlx5e_selq_prepare_htb(&priv->selq, 0, 0);
 	mlx5e_selq_apply(&priv->selq);
 
-	WRITE_ONCE(priv->htb.maj_id, 0);
-
 	root = mlx5e_sw_node_find(priv, MLX5E_HTB_CLASSID_ROOT);
 	if (!root) {
 		qos_err(priv->mdev, "Failed to find the root node in the QoS tree\n");
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/selq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/selq.c
@@ -19,6 +19,8 @@ struct mlx5e_selq_params {
 			bool is_ptp : 1;
 		};
 	};
+	u16 htb_maj_id;
+	u16 htb_defcls;
 };
 
 int mlx5e_selq_init(struct mlx5e_selq *selq, struct mutex *state_lock)
@@ -44,6 +46,8 @@ int mlx5e_selq_init(struct mlx5e_selq *s
 		.num_tcs = 1,
 		.is_htb = false,
 		.is_ptp = false,
+		.htb_maj_id = 0,
+		.htb_defcls = 0,
 	};
 	rcu_assign_pointer(selq->active, init_params);
 
@@ -64,21 +68,50 @@ void mlx5e_selq_cleanup(struct mlx5e_sel
 	selq->standby = NULL;
 }
 
-void mlx5e_selq_prepare(struct mlx5e_selq *selq, struct mlx5e_params *params, bool htb)
+void mlx5e_selq_prepare_params(struct mlx5e_selq *selq, struct mlx5e_params *params)
 {
+	struct mlx5e_selq_params *selq_active;
+
 	lockdep_assert_held(selq->state_lock);
 	WARN_ON_ONCE(selq->is_prepared);
 
 	selq->is_prepared = true;
 
+	selq_active = rcu_dereference_protected(selq->active,
+						lockdep_is_held(selq->state_lock));
+	*selq->standby = *selq_active;
 	selq->standby->num_channels = params->num_channels;
 	selq->standby->num_tcs = mlx5e_get_dcb_num_tc(params);
 	selq->standby->num_regular_queues =
 		selq->standby->num_channels * selq->standby->num_tcs;
-	selq->standby->is_htb = htb;
 	selq->standby->is_ptp = MLX5E_GET_PFLAG(params, MLX5E_PFLAG_TX_PORT_TS);
 }
 
+bool mlx5e_selq_is_htb_enabled(struct mlx5e_selq *selq)
+{
+	struct mlx5e_selq_params *selq_active =
+		rcu_dereference_protected(selq->active, lockdep_is_held(selq->state_lock));
+
+	return selq_active->htb_maj_id;
+}
+
+void mlx5e_selq_prepare_htb(struct mlx5e_selq *selq, u16 htb_maj_id, u16 htb_defcls)
+{
+	struct mlx5e_selq_params *selq_active;
+
+	lockdep_assert_held(selq->state_lock);
+	WARN_ON_ONCE(selq->is_prepared);
+
+	selq->is_prepared = true;
+
+	selq_active = rcu_dereference_protected(selq->active,
+						lockdep_is_held(selq->state_lock));
+	*selq->standby = *selq_active;
+	selq->standby->is_htb = htb_maj_id;
+	selq->standby->htb_maj_id = htb_maj_id;
+	selq->standby->htb_defcls = htb_defcls;
+}
+
 void mlx5e_selq_apply(struct mlx5e_selq *selq)
 {
 	struct mlx5e_selq_params *old_params;
@@ -137,15 +170,16 @@ static u16 mlx5e_select_ptpsq(struct net
 	return selq->num_regular_queues + up;
 }
 
-static int mlx5e_select_htb_queue(struct mlx5e_priv *priv, struct sk_buff *skb)
+static int mlx5e_select_htb_queue(struct mlx5e_priv *priv, struct sk_buff *skb,
+				  struct mlx5e_selq_params *selq)
 {
 	u16 classid;
 
 	/* Order maj_id before defcls - pairs with mlx5e_htb_root_add. */
-	if ((TC_H_MAJ(skb->priority) >> 16) == smp_load_acquire(&priv->htb.maj_id))
+	if ((TC_H_MAJ(skb->priority) >> 16) == selq->htb_maj_id)
 		classid = TC_H_MIN(skb->priority);
 	else
-		classid = READ_ONCE(priv->htb.defcls);
+		classid = selq->htb_defcls;
 
 	if (!classid)
 		return 0;
@@ -187,10 +221,10 @@ u16 mlx5e_select_queue(struct net_device
 			up * selq->num_channels;
 	}
 
-	if (unlikely(selq->is_htb)) {
+	if (unlikely(selq->htb_maj_id)) {
 		/* num_tcs == 1, shortcut for PTP */
 
-		txq_ix = mlx5e_select_htb_queue(priv, skb);
+		txq_ix = mlx5e_select_htb_queue(priv, skb, selq);
 		if (txq_ix > 0)
 			return txq_ix;
 
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/selq.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/selq.h
@@ -21,7 +21,9 @@ struct sk_buff;
 
 int mlx5e_selq_init(struct mlx5e_selq *selq, struct mutex *state_lock);
 void mlx5e_selq_cleanup(struct mlx5e_selq *selq);
-void mlx5e_selq_prepare(struct mlx5e_selq *selq, struct mlx5e_params *params, bool htb);
+void mlx5e_selq_prepare_params(struct mlx5e_selq *selq, struct mlx5e_params *params);
+void mlx5e_selq_prepare_htb(struct mlx5e_selq *selq, u16 htb_maj_id, u16 htb_defcls);
+bool mlx5e_selq_is_htb_enabled(struct mlx5e_selq *selq);
 void mlx5e_selq_apply(struct mlx5e_selq *selq);
 void mlx5e_selq_cancel(struct mlx5e_selq *selq);
 
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -459,7 +459,7 @@ int mlx5e_ethtool_set_channels(struct ml
 	 * because the numeration of the QoS SQs will change, while per-queue
 	 * qdiscs are attached.
 	 */
-	if (priv->htb.maj_id) {
+	if (mlx5e_selq_is_htb_enabled(&priv->selq)) {
 		err = -EINVAL;
 		netdev_err(priv->netdev, "%s: HTB offload is active, cannot change the number of channels\n",
 			   __func__);
@@ -2075,7 +2075,7 @@ static int set_pflag_tx_port_ts(struct n
 	 * the numeration of the QoS SQs will change, while per-queue qdiscs are
 	 * attached.
 	 */
-	if (priv->htb.maj_id) {
+	if (mlx5e_selq_is_htb_enabled(&priv->selq)) {
 		netdev_err(priv->netdev, "%s: HTB offload is active, cannot change the PTP state\n",
 			   __func__);
 		return -EINVAL;
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -2839,7 +2839,7 @@ int mlx5e_safe_switch_params(struct mlx5
 
 	new_chs.params = *params;
 
-	mlx5e_selq_prepare(&priv->selq, &new_chs.params, !!priv->htb.maj_id);
+	mlx5e_selq_prepare_params(&priv->selq, &new_chs.params);
 
 	err = mlx5e_open_channels(priv, &new_chs);
 	if (err)
@@ -2895,7 +2895,7 @@ int mlx5e_open_locked(struct net_device
 	struct mlx5e_priv *priv = netdev_priv(netdev);
 	int err;
 
-	mlx5e_selq_prepare(&priv->selq, &priv->channels.params, !!priv->htb.maj_id);
+	mlx5e_selq_prepare_params(&priv->selq, &priv->channels.params);
 
 	set_bit(MLX5E_STATE_OPENED, &priv->state);
 
@@ -3406,7 +3406,7 @@ static int mlx5e_setup_tc_mqprio(struct
 	/* MQPRIO is another toplevel qdisc that can't be attached
 	 * simultaneously with the offloaded HTB.
 	 */
-	if (WARN_ON(priv->htb.maj_id))
+	if (WARN_ON(mlx5e_selq_is_htb_enabled(&priv->selq)))
 		return -EINVAL;
 
 	switch (mqprio->mode) {
@@ -3672,6 +3672,7 @@ static int set_feature_cvlan_filter(stru
 static int set_feature_hw_tc(struct net_device *netdev, bool enable)
 {
 	struct mlx5e_priv *priv = netdev_priv(netdev);
+	int err = 0;
 
 #if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
 	if (!enable && mlx5e_tc_num_filters(priv, MLX5_TC_FLAG(NIC_OFFLOAD))) {
@@ -3681,12 +3682,14 @@ static int set_feature_hw_tc(struct net_
 	}
 #endif
 
-	if (!enable && priv->htb.maj_id) {
+	mutex_lock(&priv->state_lock);
+	if (!enable && mlx5e_selq_is_htb_enabled(&priv->selq)) {
 		netdev_err(netdev, "Active HTB offload, can't turn hw_tc_offload off\n");
-		return -EINVAL;
+		err = -EINVAL;
 	}
+	mutex_unlock(&priv->state_lock);
 
-	return 0;
+	return err;
 }
 
 static int set_feature_rx_all(struct net_device *netdev, bool enable)