Blob Blame History Raw
From: Lama Kayal <lkayal@nvidia.com>
Date: Sun, 9 Jan 2022 11:23:05 +0200
Subject: net/mlx5e: Convert mlx5e_flow_steering member of mlx5e_priv to
 pointer
Patch-mainline: v6.0-rc1
Git-commit: af8bbf7300686961f74e72e2dc10a76672603cb3
References: jsc#PED-1549

Make mlx5e_flow_steering member of mlx5e_priv a pointer.
Add dynamic allocation respectively.

Allocate fs for all profiles when initializing profile,
symmetrically deallocate at profile cleanup.

Signed-off-by: Lama Kayal <lkayal@nvidia.com>
Reviewed-by: Tariq Toukan <tariqt@nvidia.com>
Signed-off-by: Saeed Mahameed <saeedm@nvidia.com>
Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
---
 drivers/net/ethernet/mellanox/mlx5/core/en.h                |    2 
 drivers/net/ethernet/mellanox/mlx5/core/en/fs.h             |    5 
 drivers/net/ethernet/mellanox/mlx5/core/en/fs_tt_redirect.c |   72 +--
 drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c            |    8 
 drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/goto.c    |    2 
 drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.c   |   32 -
 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c |   10 
 drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c           |   58 +--
 drivers/net/ethernet/mellanox/mlx5/core/en_fs.c             |  226 ++++++------
 drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c     |   24 -
 drivers/net/ethernet/mellanox/mlx5/core/en_main.c           |   13 
 drivers/net/ethernet/mellanox/mlx5/core/en_rep.c            |   31 +
 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c             |   60 +--
 drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c       |   19 -
 14 files changed, 298 insertions(+), 264 deletions(-)

--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -921,7 +921,7 @@ struct mlx5e_priv {
 	struct mlx5e_rx_res       *rx_res;
 	u32                       *tx_rates;
 
-	struct mlx5e_flow_steering fs;
+	struct mlx5e_flow_steering *fs;
 
 	struct workqueue_struct    *wq;
 	struct work_struct         update_carrier_work;
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
@@ -137,6 +137,7 @@ static inline int mlx5e_arfs_disable(str
 struct mlx5e_accel_fs_tcp;
 #endif
 
+struct mlx5e_profile;
 struct mlx5e_fs_udp;
 struct mlx5e_fs_any;
 struct mlx5e_ptp_fs;
@@ -177,8 +178,8 @@ void mlx5e_disable_cvlan_filter(struct m
 int mlx5e_create_flow_steering(struct mlx5e_priv *priv);
 void mlx5e_destroy_flow_steering(struct mlx5e_priv *priv);
 
-int mlx5e_fs_init(struct mlx5e_priv *priv);
-void mlx5e_fs_cleanup(struct mlx5e_priv *priv);
+struct mlx5e_flow_steering *mlx5e_fs_init(const struct mlx5e_profile *profile);
+void mlx5e_fs_cleanup(struct mlx5e_flow_steering *fs);
 
 int mlx5e_add_vlan_trap(struct mlx5e_priv *priv, int  trap_id, int tir_num);
 void mlx5e_remove_vlan_trap(struct mlx5e_priv *priv);
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/fs_tt_redirect.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/fs_tt_redirect.c
@@ -94,7 +94,7 @@ mlx5e_fs_tt_redirect_udp_add_rule(struct
 	if (!spec)
 		return ERR_PTR(-ENOMEM);
 
-	fs_udp = priv->fs.udp;
+	fs_udp = priv->fs->udp;
 	ft = fs_udp->tables[type].t;
 
 	fs_udp_set_dport_flow(spec, type, d_port);
@@ -121,10 +121,10 @@ static int fs_udp_add_default_rule(struc
 	struct mlx5e_fs_udp *fs_udp;
 	int err;
 
-	fs_udp = priv->fs.udp;
+	fs_udp = priv->fs->udp;
 	fs_udp_t = &fs_udp->tables[type];
 
-	dest = mlx5_ttc_get_default_dest(priv->fs.ttc, fs_udp2tt(type));
+	dest = mlx5_ttc_get_default_dest(priv->fs->ttc, fs_udp2tt(type));
 	rule = mlx5_add_flow_rules(fs_udp_t->t, NULL, &flow_act, &dest, 1);
 	if (IS_ERR(rule)) {
 		err = PTR_ERR(rule);
@@ -208,7 +208,7 @@ out:
 
 static int fs_udp_create_table(struct mlx5e_priv *priv, enum fs_udp_type type)
 {
-	struct mlx5e_flow_table *ft = &priv->fs.udp->tables[type];
+	struct mlx5e_flow_table *ft = &priv->fs->udp->tables[type];
 	struct mlx5_flow_table_attr ft_attr = {};
 	int err;
 
@@ -218,7 +218,7 @@ static int fs_udp_create_table(struct ml
 	ft_attr.level = MLX5E_FS_TT_UDP_FT_LEVEL;
 	ft_attr.prio = MLX5E_NIC_PRIO;
 
-	ft->t = mlx5_create_flow_table(priv->fs.ns, &ft_attr);
+	ft->t = mlx5_create_flow_table(priv->fs->ns, &ft_attr);
 	if (IS_ERR(ft->t)) {
 		err = PTR_ERR(ft->t);
 		ft->t = NULL;
@@ -259,7 +259,7 @@ static int fs_udp_disable(struct mlx5e_p
 
 	for (i = 0; i < FS_UDP_NUM_TYPES; i++) {
 		/* Modify ttc rules destination to point back to the indir TIRs */
-		err = mlx5_ttc_fwd_default_dest(priv->fs.ttc, fs_udp2tt(i));
+		err = mlx5_ttc_fwd_default_dest(priv->fs->ttc, fs_udp2tt(i));
 		if (err) {
 			netdev_err(priv->netdev,
 				   "%s: modify ttc[%d] default destination failed, err(%d)\n",
@@ -278,10 +278,10 @@ static int fs_udp_enable(struct mlx5e_pr
 
 	dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
 	for (i = 0; i < FS_UDP_NUM_TYPES; i++) {
-		dest.ft = priv->fs.udp->tables[i].t;
+		dest.ft = priv->fs->udp->tables[i].t;
 
 		/* Modify ttc rules destination to point on the accel_fs FTs */
-		err = mlx5_ttc_fwd_dest(priv->fs.ttc, fs_udp2tt(i), &dest);
+		err = mlx5_ttc_fwd_dest(priv->fs->ttc, fs_udp2tt(i), &dest);
 		if (err) {
 			netdev_err(priv->netdev,
 				   "%s: modify ttc[%d] destination to accel failed, err(%d)\n",
@@ -294,7 +294,7 @@ static int fs_udp_enable(struct mlx5e_pr
 
 void mlx5e_fs_tt_redirect_udp_destroy(struct mlx5e_priv *priv)
 {
-	struct mlx5e_fs_udp *fs_udp = priv->fs.udp;
+	struct mlx5e_fs_udp *fs_udp = priv->fs->udp;
 	int i;
 
 	if (!fs_udp)
@@ -309,20 +309,20 @@ void mlx5e_fs_tt_redirect_udp_destroy(st
 		fs_udp_destroy_table(fs_udp, i);
 
 	kfree(fs_udp);
-	priv->fs.udp = NULL;
+	priv->fs->udp = NULL;
 }
 
 int mlx5e_fs_tt_redirect_udp_create(struct mlx5e_priv *priv)
 {
 	int i, err;
 
-	if (priv->fs.udp) {
-		priv->fs.udp->ref_cnt++;
+	if (priv->fs->udp) {
+		priv->fs->udp->ref_cnt++;
 		return 0;
 	}
 
-	priv->fs.udp = kzalloc(sizeof(*priv->fs.udp), GFP_KERNEL);
-	if (!priv->fs.udp)
+	priv->fs->udp = kzalloc(sizeof(*priv->fs->udp), GFP_KERNEL);
+	if (!priv->fs->udp)
 		return -ENOMEM;
 
 	for (i = 0; i < FS_UDP_NUM_TYPES; i++) {
@@ -335,16 +335,16 @@ int mlx5e_fs_tt_redirect_udp_create(stru
 	if (err)
 		goto err_destroy_tables;
 
-	priv->fs.udp->ref_cnt = 1;
+	priv->fs->udp->ref_cnt = 1;
 
 	return 0;
 
 err_destroy_tables:
 	while (--i >= 0)
-		fs_udp_destroy_table(priv->fs.udp, i);
+		fs_udp_destroy_table(priv->fs->udp, i);
 
-	kfree(priv->fs.udp);
-	priv->fs.udp = NULL;
+	kfree(priv->fs->udp);
+	priv->fs->udp = NULL;
 	return err;
 }
 
@@ -371,7 +371,7 @@ mlx5e_fs_tt_redirect_any_add_rule(struct
 	if (!spec)
 		return ERR_PTR(-ENOMEM);
 
-	fs_any = priv->fs.any;
+	fs_any = priv->fs->any;
 	ft = fs_any->table.t;
 
 	fs_any_set_ethertype_flow(spec, ether_type);
@@ -398,10 +398,10 @@ static int fs_any_add_default_rule(struc
 	struct mlx5e_fs_any *fs_any;
 	int err;
 
-	fs_any = priv->fs.any;
+	fs_any = priv->fs->any;
 	fs_any_t = &fs_any->table;
 
-	dest = mlx5_ttc_get_default_dest(priv->fs.ttc, MLX5_TT_ANY);
+	dest = mlx5_ttc_get_default_dest(priv->fs->ttc, MLX5_TT_ANY);
 	rule = mlx5_add_flow_rules(fs_any_t->t, NULL, &flow_act, &dest, 1);
 	if (IS_ERR(rule)) {
 		err = PTR_ERR(rule);
@@ -474,7 +474,7 @@ err:
 
 static int fs_any_create_table(struct mlx5e_priv *priv)
 {
-	struct mlx5e_flow_table *ft = &priv->fs.any->table;
+	struct mlx5e_flow_table *ft = &priv->fs->any->table;
 	struct mlx5_flow_table_attr ft_attr = {};
 	int err;
 
@@ -484,7 +484,7 @@ static int fs_any_create_table(struct ml
 	ft_attr.level = MLX5E_FS_TT_ANY_FT_LEVEL;
 	ft_attr.prio = MLX5E_NIC_PRIO;
 
-	ft->t = mlx5_create_flow_table(priv->fs.ns, &ft_attr);
+	ft->t = mlx5_create_flow_table(priv->fs->ns, &ft_attr);
 	if (IS_ERR(ft->t)) {
 		err = PTR_ERR(ft->t);
 		ft->t = NULL;
@@ -514,7 +514,7 @@ static int fs_any_disable(struct mlx5e_p
 	int err;
 
 	/* Modify ttc rules destination to point back to the indir TIRs */
-	err = mlx5_ttc_fwd_default_dest(priv->fs.ttc, MLX5_TT_ANY);
+	err = mlx5_ttc_fwd_default_dest(priv->fs->ttc, MLX5_TT_ANY);
 	if (err) {
 		netdev_err(priv->netdev,
 			   "%s: modify ttc[%d] default destination failed, err(%d)\n",
@@ -530,10 +530,10 @@ static int fs_any_enable(struct mlx5e_pr
 	int err;
 
 	dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
-	dest.ft = priv->fs.any->table.t;
+	dest.ft = priv->fs->any->table.t;
 
 	/* Modify ttc rules destination to point on the accel_fs FTs */
-	err = mlx5_ttc_fwd_dest(priv->fs.ttc, MLX5_TT_ANY, &dest);
+	err = mlx5_ttc_fwd_dest(priv->fs->ttc, MLX5_TT_ANY, &dest);
 	if (err) {
 		netdev_err(priv->netdev,
 			   "%s: modify ttc[%d] destination to accel failed, err(%d)\n",
@@ -555,7 +555,7 @@ static void fs_any_destroy_table(struct
 
 void mlx5e_fs_tt_redirect_any_destroy(struct mlx5e_priv *priv)
 {
-	struct mlx5e_fs_any *fs_any = priv->fs.any;
+	struct mlx5e_fs_any *fs_any = priv->fs->any;
 
 	if (!fs_any)
 		return;
@@ -568,20 +568,20 @@ void mlx5e_fs_tt_redirect_any_destroy(st
 	fs_any_destroy_table(fs_any);
 
 	kfree(fs_any);
-	priv->fs.any = NULL;
+	priv->fs->any = NULL;
 }
 
 int mlx5e_fs_tt_redirect_any_create(struct mlx5e_priv *priv)
 {
 	int err;
 
-	if (priv->fs.any) {
-		priv->fs.any->ref_cnt++;
+	if (priv->fs->any) {
+		priv->fs->any->ref_cnt++;
 		return 0;
 	}
 
-	priv->fs.any = kzalloc(sizeof(*priv->fs.any), GFP_KERNEL);
-	if (!priv->fs.any)
+	priv->fs->any = kzalloc(sizeof(*priv->fs->any), GFP_KERNEL);
+	if (!priv->fs->any)
 		return -ENOMEM;
 
 	err = fs_any_create_table(priv);
@@ -592,14 +592,14 @@ int mlx5e_fs_tt_redirect_any_create(stru
 	if (err)
 		goto err_destroy_table;
 
-	priv->fs.any->ref_cnt = 1;
+	priv->fs->any->ref_cnt = 1;
 
 	return 0;
 
 err_destroy_table:
-	fs_any_destroy_table(priv->fs.any);
+	fs_any_destroy_table(priv->fs->any);
 
-	kfree(priv->fs.any);
-	priv->fs.any = NULL;
+	kfree(priv->fs->any);
+	priv->fs->any = NULL;
 	return err;
 }
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
@@ -624,7 +624,7 @@ static int mlx5e_ptp_set_state(struct ml
 
 static void mlx5e_ptp_rx_unset_fs(struct mlx5e_priv *priv)
 {
-	struct mlx5e_ptp_fs *ptp_fs = priv->fs.ptp_fs;
+	struct mlx5e_ptp_fs *ptp_fs = priv->fs->ptp_fs;
 
 	if (!ptp_fs->valid)
 		return;
@@ -641,7 +641,7 @@ static void mlx5e_ptp_rx_unset_fs(struct
 static int mlx5e_ptp_rx_set_fs(struct mlx5e_priv *priv)
 {
 	u32 tirn = mlx5e_rx_res_get_tirn_ptp(priv->rx_res);
-	struct mlx5e_ptp_fs *ptp_fs = priv->fs.ptp_fs;
+	struct mlx5e_ptp_fs *ptp_fs = priv->fs->ptp_fs;
 	struct mlx5_flow_handle *rule;
 	int err;
 
@@ -808,13 +808,13 @@ int mlx5e_ptp_alloc_rx_fs(struct mlx5e_p
 	if (!ptp_fs)
 		return -ENOMEM;
 
-	priv->fs.ptp_fs = ptp_fs;
+	priv->fs->ptp_fs = ptp_fs;
 	return 0;
 }
 
 void mlx5e_ptp_free_rx_fs(struct mlx5e_priv *priv)
 {
-	struct mlx5e_ptp_fs *ptp_fs = priv->fs.ptp_fs;
+	struct mlx5e_ptp_fs *ptp_fs = priv->fs->ptp_fs;
 
 	if (!mlx5e_profile_feature_cap(priv->profile, PTP_RX))
 		return;
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/goto.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/goto.c
@@ -21,7 +21,7 @@ validate_goto_chain(struct mlx5e_priv *p
 	u32 max_chain;
 
 	esw = priv->mdev->priv.eswitch;
-	chains = is_esw ? esw_chains(esw) : mlx5e_nic_chains(priv->fs.tc);
+	chains = is_esw ? esw_chains(esw) : mlx5e_nic_chains(priv->fs->tc);
 	max_chain = mlx5_chains_get_chain_range(chains);
 	reformat_and_fwd = is_esw ?
 			   MLX5_CAP_ESW_FLOWTABLE_FDB(priv->mdev, reformat_and_fwd_to_table) :
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.c
@@ -86,7 +86,7 @@ struct mlx5_flow_handle *mlx5e_accel_fs_
 	if (!spec)
 		return ERR_PTR(-ENOMEM);
 
-	fs_tcp = priv->fs.accel_tcp;
+	fs_tcp = priv->fs->accel_tcp;
 
 	spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
 
@@ -158,10 +158,10 @@ static int accel_fs_tcp_add_default_rule
 	struct mlx5_flow_handle *rule;
 	int err = 0;
 
-	fs_tcp = priv->fs.accel_tcp;
+	fs_tcp = priv->fs->accel_tcp;
 	accel_fs_t = &fs_tcp->tables[type];
 
-	dest = mlx5_ttc_get_default_dest(priv->fs.ttc, fs_accel2tt(type));
+	dest = mlx5_ttc_get_default_dest(priv->fs->ttc, fs_accel2tt(type));
 	rule = mlx5_add_flow_rules(accel_fs_t->t, NULL, &flow_act, &dest, 1);
 	if (IS_ERR(rule)) {
 		err = PTR_ERR(rule);
@@ -267,7 +267,7 @@ out:
 
 static int accel_fs_tcp_create_table(struct mlx5e_priv *priv, enum accel_fs_tcp_type type)
 {
-	struct mlx5e_flow_table *ft = &priv->fs.accel_tcp->tables[type];
+	struct mlx5e_flow_table *ft = &priv->fs->accel_tcp->tables[type];
 	struct mlx5_flow_table_attr ft_attr = {};
 	int err;
 
@@ -277,7 +277,7 @@ static int accel_fs_tcp_create_table(str
 	ft_attr.level = MLX5E_ACCEL_FS_TCP_FT_LEVEL;
 	ft_attr.prio = MLX5E_NIC_PRIO;
 
-	ft->t = mlx5_create_flow_table(priv->fs.ns, &ft_attr);
+	ft->t = mlx5_create_flow_table(priv->fs->ns, &ft_attr);
 	if (IS_ERR(ft->t)) {
 		err = PTR_ERR(ft->t);
 		ft->t = NULL;
@@ -307,7 +307,7 @@ static int accel_fs_tcp_disable(struct m
 
 	for (i = 0; i < ACCEL_FS_TCP_NUM_TYPES; i++) {
 		/* Modify ttc rules destination to point back to the indir TIRs */
-		err = mlx5_ttc_fwd_default_dest(priv->fs.ttc, fs_accel2tt(i));
+		err = mlx5_ttc_fwd_default_dest(priv->fs->ttc, fs_accel2tt(i));
 		if (err) {
 			netdev_err(priv->netdev,
 				   "%s: modify ttc[%d] default destination failed, err(%d)\n",
@@ -326,10 +326,10 @@ static int accel_fs_tcp_enable(struct ml
 
 	dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
 	for (i = 0; i < ACCEL_FS_TCP_NUM_TYPES; i++) {
-		dest.ft = priv->fs.accel_tcp->tables[i].t;
+		dest.ft = priv->fs->accel_tcp->tables[i].t;
 
 		/* Modify ttc rules destination to point on the accel_fs FTs */
-		err = mlx5_ttc_fwd_dest(priv->fs.ttc, fs_accel2tt(i), &dest);
+		err = mlx5_ttc_fwd_dest(priv->fs->ttc, fs_accel2tt(i), &dest);
 		if (err) {
 			netdev_err(priv->netdev,
 				   "%s: modify ttc[%d] destination to accel failed, err(%d)\n",
@@ -344,7 +344,7 @@ static void accel_fs_tcp_destroy_table(s
 {
 	struct mlx5e_accel_fs_tcp *fs_tcp;
 
-	fs_tcp = priv->fs.accel_tcp;
+	fs_tcp = priv->fs->accel_tcp;
 	if (IS_ERR_OR_NULL(fs_tcp->tables[i].t))
 		return;
 
@@ -357,7 +357,7 @@ void mlx5e_accel_fs_tcp_destroy(struct m
 {
 	int i;
 
-	if (!priv->fs.accel_tcp)
+	if (!priv->fs->accel_tcp)
 		return;
 
 	accel_fs_tcp_disable(priv);
@@ -365,8 +365,8 @@ void mlx5e_accel_fs_tcp_destroy(struct m
 	for (i = 0; i < ACCEL_FS_TCP_NUM_TYPES; i++)
 		accel_fs_tcp_destroy_table(priv, i);
 
-	kfree(priv->fs.accel_tcp);
-	priv->fs.accel_tcp = NULL;
+	kfree(priv->fs->accel_tcp);
+	priv->fs->accel_tcp = NULL;
 }
 
 int mlx5e_accel_fs_tcp_create(struct mlx5e_priv *priv)
@@ -376,8 +376,8 @@ int mlx5e_accel_fs_tcp_create(struct mlx
 	if (!MLX5_CAP_FLOWTABLE_NIC_RX(priv->mdev, ft_field_support.outer_ip_version))
 		return -EOPNOTSUPP;
 
-	priv->fs.accel_tcp = kzalloc(sizeof(*priv->fs.accel_tcp), GFP_KERNEL);
-	if (!priv->fs.accel_tcp)
+	priv->fs->accel_tcp = kzalloc(sizeof(*priv->fs->accel_tcp), GFP_KERNEL);
+	if (!priv->fs->accel_tcp)
 		return -ENOMEM;
 
 	for (i = 0; i < ACCEL_FS_TCP_NUM_TYPES; i++) {
@@ -396,7 +396,7 @@ err_destroy_tables:
 	while (--i >= 0)
 		accel_fs_tcp_destroy_table(priv, i);
 
-	kfree(priv->fs.accel_tcp);
-	priv->fs.accel_tcp = NULL;
+	kfree(priv->fs->accel_tcp);
+	priv->fs->accel_tcp = NULL;
 	return err;
 }
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
@@ -184,13 +184,13 @@ static int rx_create(struct mlx5e_priv *
 	fs_prot = &accel_esp->fs_prot[type];
 
 	fs_prot->default_dest =
-		mlx5_ttc_get_default_dest(priv->fs.ttc, fs_esp2tt(type));
+		mlx5_ttc_get_default_dest(priv->fs->ttc, fs_esp2tt(type));
 
 	ft_attr.max_fte = 1;
 	ft_attr.autogroup.max_num_groups = 1;
 	ft_attr.level = MLX5E_ACCEL_FS_ESP_FT_ERR_LEVEL;
 	ft_attr.prio = MLX5E_NIC_PRIO;
-	ft = mlx5_create_auto_grouped_flow_table(priv->fs.ns, &ft_attr);
+	ft = mlx5_create_auto_grouped_flow_table(priv->fs->ns, &ft_attr);
 	if (IS_ERR(ft))
 		return PTR_ERR(ft);
 
@@ -205,7 +205,7 @@ static int rx_create(struct mlx5e_priv *
 	ft_attr.prio = MLX5E_NIC_PRIO;
 	ft_attr.autogroup.num_reserved_entries = 1;
 	ft_attr.autogroup.max_num_groups = 1;
-	ft = mlx5_create_auto_grouped_flow_table(priv->fs.ns, &ft_attr);
+	ft = mlx5_create_auto_grouped_flow_table(priv->fs->ns, &ft_attr);
 	if (IS_ERR(ft)) {
 		err = PTR_ERR(ft);
 		goto err_fs_ft;
@@ -249,7 +249,7 @@ static int rx_ft_get(struct mlx5e_priv *
 	/* connect */
 	dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
 	dest.ft = fs_prot->ft;
-	mlx5_ttc_fwd_dest(priv->fs.ttc, fs_esp2tt(type), &dest);
+	mlx5_ttc_fwd_dest(priv->fs->ttc, fs_esp2tt(type), &dest);
 
 skip:
 	fs_prot->refcnt++;
@@ -271,7 +271,7 @@ static void rx_ft_put(struct mlx5e_priv
 		goto out;
 
 	/* disconnect */
-	mlx5_ttc_fwd_default_dest(priv->fs.ttc, fs_esp2tt(type));
+	mlx5_ttc_fwd_default_dest(priv->fs->ttc, fs_esp2tt(type));
 
 	/* remove FT */
 	rx_destroy(priv, type);
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
@@ -120,7 +120,7 @@ static int arfs_disable(struct mlx5e_pri
 
 	for (i = 0; i < ARFS_NUM_TYPES; i++) {
 		/* Modify ttc rules destination back to their default */
-		err = mlx5_ttc_fwd_default_dest(priv->fs.ttc, arfs_get_tt(i));
+		err = mlx5_ttc_fwd_default_dest(priv->fs->ttc, arfs_get_tt(i));
 		if (err) {
 			netdev_err(priv->netdev,
 				   "%s: modify ttc[%d] default destination failed, err(%d)\n",
@@ -147,9 +147,9 @@ int mlx5e_arfs_enable(struct mlx5e_priv
 
 	dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
 	for (i = 0; i < ARFS_NUM_TYPES; i++) {
-		dest.ft = priv->fs.arfs->arfs_tables[i].ft.t;
+		dest.ft = priv->fs->arfs->arfs_tables[i].ft.t;
 		/* Modify ttc rules destination to point on the aRFS FTs */
-		err = mlx5_ttc_fwd_dest(priv->fs.ttc, arfs_get_tt(i), &dest);
+		err = mlx5_ttc_fwd_dest(priv->fs->ttc, arfs_get_tt(i), &dest);
 		if (err) {
 			netdev_err(priv->netdev,
 				   "%s: modify ttc[%d] dest to arfs, failed err(%d)\n",
@@ -172,10 +172,10 @@ static void _mlx5e_cleanup_tables(struct
 	int i;
 
 	arfs_del_rules(priv);
-	destroy_workqueue(priv->fs.arfs->wq);
+	destroy_workqueue(priv->fs->arfs->wq);
 	for (i = 0; i < ARFS_NUM_TYPES; i++) {
-		if (!IS_ERR_OR_NULL(priv->fs.arfs->arfs_tables[i].ft.t))
-			arfs_destroy_table(&priv->fs.arfs->arfs_tables[i]);
+		if (!IS_ERR_OR_NULL(priv->fs->arfs->arfs_tables[i].ft.t))
+			arfs_destroy_table(&priv->fs->arfs->arfs_tables[i]);
 	}
 }
 
@@ -185,13 +185,13 @@ void mlx5e_arfs_destroy_tables(struct ml
 		return;
 
 	_mlx5e_cleanup_tables(priv);
-	kvfree(priv->fs.arfs);
+	kvfree(priv->fs->arfs);
 }
 
 static int arfs_add_default_rule(struct mlx5e_priv *priv,
 				 enum arfs_type type)
 {
-	struct arfs_table *arfs_t = &priv->fs.arfs->arfs_tables[type];
+	struct arfs_table *arfs_t = &priv->fs->arfs->arfs_tables[type];
 	struct mlx5_flow_destination dest = {};
 	MLX5_DECLARE_FLOW_ACT(flow_act);
 	enum mlx5_traffic_types tt;
@@ -321,7 +321,7 @@ out:
 static int arfs_create_table(struct mlx5e_priv *priv,
 			     enum arfs_type type)
 {
-	struct mlx5e_arfs_tables *arfs = priv->fs.arfs;
+	struct mlx5e_arfs_tables *arfs = priv->fs->arfs;
 	struct mlx5e_flow_table *ft = &arfs->arfs_tables[type].ft;
 	struct mlx5_flow_table_attr ft_attr = {};
 	int err;
@@ -332,7 +332,7 @@ static int arfs_create_table(struct mlx5
 	ft_attr.level = MLX5E_ARFS_FT_LEVEL;
 	ft_attr.prio = MLX5E_NIC_PRIO;
 
-	ft->t = mlx5_create_flow_table(priv->fs.ns, &ft_attr);
+	ft->t = mlx5_create_flow_table(priv->fs->ns, &ft_attr);
 	if (IS_ERR(ft->t)) {
 		err = PTR_ERR(ft->t);
 		ft->t = NULL;
@@ -361,14 +361,14 @@ int mlx5e_arfs_create_tables(struct mlx5
 	if (!(priv->netdev->hw_features & NETIF_F_NTUPLE))
 		return 0;
 
-	priv->fs.arfs = kvzalloc(sizeof(*priv->fs.arfs), GFP_KERNEL);
-	if (!priv->fs.arfs)
+	priv->fs->arfs = kvzalloc(sizeof(*priv->fs->arfs), GFP_KERNEL);
+	if (!priv->fs->arfs)
 		return -ENOMEM;
 
-	spin_lock_init(&priv->fs.arfs->arfs_lock);
-	INIT_LIST_HEAD(&priv->fs.arfs->rules);
-	priv->fs.arfs->wq = create_singlethread_workqueue("mlx5e_arfs");
-	if (!priv->fs.arfs->wq)
+	spin_lock_init(&priv->fs->arfs->arfs_lock);
+	INIT_LIST_HEAD(&priv->fs->arfs->rules);
+	priv->fs->arfs->wq = create_singlethread_workqueue("mlx5e_arfs");
+	if (!priv->fs->arfs->wq)
 		goto err;
 
 	for (i = 0; i < ARFS_NUM_TYPES; i++) {
@@ -381,7 +381,7 @@ int mlx5e_arfs_create_tables(struct mlx5
 err_des:
 	_mlx5e_cleanup_tables(priv);
 err:
-	kvfree(priv->fs.arfs);
+	kvfree(priv->fs->arfs);
 	return err;
 }
 
@@ -396,8 +396,8 @@ static void arfs_may_expire_flow(struct
 	int i;
 	int j;
 
-	spin_lock_bh(&priv->fs.arfs->arfs_lock);
-	mlx5e_for_each_arfs_rule(arfs_rule, htmp, priv->fs.arfs->arfs_tables, i, j) {
+	spin_lock_bh(&priv->fs->arfs->arfs_lock);
+	mlx5e_for_each_arfs_rule(arfs_rule, htmp, priv->fs->arfs->arfs_tables, i, j) {
 		if (!work_pending(&arfs_rule->arfs_work) &&
 		    rps_may_expire_flow(priv->netdev,
 					arfs_rule->rxq, arfs_rule->flow_id,
@@ -408,7 +408,7 @@ static void arfs_may_expire_flow(struct
 				break;
 		}
 	}
-	spin_unlock_bh(&priv->fs.arfs->arfs_lock);
+	spin_unlock_bh(&priv->fs->arfs->arfs_lock);
 	hlist_for_each_entry_safe(arfs_rule, htmp, &del_list, hlist) {
 		if (arfs_rule->rule)
 			mlx5_del_flow_rules(arfs_rule->rule);
@@ -425,12 +425,12 @@ static void arfs_del_rules(struct mlx5e_
 	int i;
 	int j;
 
-	spin_lock_bh(&priv->fs.arfs->arfs_lock);
-	mlx5e_for_each_arfs_rule(rule, htmp, priv->fs.arfs->arfs_tables, i, j) {
+	spin_lock_bh(&priv->fs->arfs->arfs_lock);
+	mlx5e_for_each_arfs_rule(rule, htmp, priv->fs->arfs->arfs_tables, i, j) {
 		hlist_del_init(&rule->hlist);
 		hlist_add_head(&rule->hlist, &del_list);
 	}
-	spin_unlock_bh(&priv->fs.arfs->arfs_lock);
+	spin_unlock_bh(&priv->fs->arfs->arfs_lock);
 
 	hlist_for_each_entry_safe(rule, htmp, &del_list, hlist) {
 		cancel_work_sync(&rule->arfs_work);
@@ -474,7 +474,7 @@ static struct arfs_table *arfs_get_table
 static struct mlx5_flow_handle *arfs_add_rule(struct mlx5e_priv *priv,
 					      struct arfs_rule *arfs_rule)
 {
-	struct mlx5e_arfs_tables *arfs = priv->fs.arfs;
+	struct mlx5e_arfs_tables *arfs = priv->fs->arfs;
 	struct arfs_tuple *tuple = &arfs_rule->tuple;
 	struct mlx5_flow_handle *rule = NULL;
 	struct mlx5_flow_destination dest = {};
@@ -592,9 +592,9 @@ static void arfs_handle_work(struct work
 
 	mutex_lock(&priv->state_lock);
 	if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
-		spin_lock_bh(&priv->fs.arfs->arfs_lock);
+		spin_lock_bh(&priv->fs->arfs->arfs_lock);
 		hlist_del(&arfs_rule->hlist);
-		spin_unlock_bh(&priv->fs.arfs->arfs_lock);
+		spin_unlock_bh(&priv->fs->arfs->arfs_lock);
 
 		mutex_unlock(&priv->state_lock);
 		kfree(arfs_rule);
@@ -647,7 +647,7 @@ static struct arfs_rule *arfs_alloc_rule
 	tuple->dst_port = fk->ports.dst;
 
 	rule->flow_id = flow_id;
-	rule->filter_id = priv->fs.arfs->last_filter_id++ % RPS_NO_FILTER;
+	rule->filter_id = priv->fs->arfs->last_filter_id++ % RPS_NO_FILTER;
 
 	hlist_add_head(&rule->hlist,
 		       arfs_hash_bucket(arfs_t, tuple->src_port,
@@ -691,7 +691,7 @@ int mlx5e_rx_flow_steer(struct net_devic
 			u16 rxq_index, u32 flow_id)
 {
 	struct mlx5e_priv *priv = netdev_priv(dev);
-	struct mlx5e_arfs_tables *arfs = priv->fs.arfs;
+	struct mlx5e_arfs_tables *arfs = priv->fs->arfs;
 	struct arfs_table *arfs_t;
 	struct arfs_rule *arfs_rule;
 	struct flow_keys fk;
@@ -725,7 +725,7 @@ int mlx5e_rx_flow_steer(struct net_devic
 			return -ENOMEM;
 		}
 	}
-	queue_work(priv->fs.arfs->wq, &arfs_rule->arfs_work);
+	queue_work(priv->fs->arfs->wq, &arfs_rule->arfs_work);
 	spin_unlock_bh(&arfs->arfs_lock);
 	return arfs_rule->filter_id;
 }
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
@@ -144,7 +144,7 @@ static int mlx5e_vport_context_update_vl
 	int i;
 
 	list_size = 0;
-	for_each_set_bit(vlan, priv->fs.vlan->active_cvlans, VLAN_N_VID)
+	for_each_set_bit(vlan, priv->fs->vlan->active_cvlans, VLAN_N_VID)
 		list_size++;
 
 	max_list_size = 1 << MLX5_CAP_GEN(priv->mdev, log_max_vlan_list);
@@ -161,7 +161,7 @@ static int mlx5e_vport_context_update_vl
 		return -ENOMEM;
 
 	i = 0;
-	for_each_set_bit(vlan, priv->fs.vlan->active_cvlans, VLAN_N_VID) {
+	for_each_set_bit(vlan, priv->fs->vlan->active_cvlans, VLAN_N_VID) {
 		if (i >= list_size)
 			break;
 		vlans[i++] = vlan;
@@ -188,14 +188,14 @@ static int __mlx5e_add_vlan_rule(struct
 				 enum mlx5e_vlan_rule_type rule_type,
 				 u16 vid, struct mlx5_flow_spec *spec)
 {
-	struct mlx5_flow_table *ft = priv->fs.vlan->ft.t;
+	struct mlx5_flow_table *ft = priv->fs->vlan->ft.t;
 	struct mlx5_flow_destination dest = {};
 	struct mlx5_flow_handle **rule_p;
 	MLX5_DECLARE_FLOW_ACT(flow_act);
 	int err = 0;
 
 	dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
-	dest.ft = priv->fs.l2.ft.t;
+	dest.ft = priv->fs->l2.ft.t;
 
 	spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
 
@@ -205,24 +205,24 @@ static int __mlx5e_add_vlan_rule(struct
 		 * disabled in match value means both S & C tags
 		 * don't exist (untagged of both)
 		 */
-		rule_p = &priv->fs.vlan->untagged_rule;
+		rule_p = &priv->fs->vlan->untagged_rule;
 		MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
 				 outer_headers.cvlan_tag);
 		break;
 	case MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID:
-		rule_p = &priv->fs.vlan->any_cvlan_rule;
+		rule_p = &priv->fs->vlan->any_cvlan_rule;
 		MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
 				 outer_headers.cvlan_tag);
 		MLX5_SET(fte_match_param, spec->match_value, outer_headers.cvlan_tag, 1);
 		break;
 	case MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID:
-		rule_p = &priv->fs.vlan->any_svlan_rule;
+		rule_p = &priv->fs->vlan->any_svlan_rule;
 		MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
 				 outer_headers.svlan_tag);
 		MLX5_SET(fte_match_param, spec->match_value, outer_headers.svlan_tag, 1);
 		break;
 	case MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID:
-		rule_p = &priv->fs.vlan->active_svlans_rule[vid];
+		rule_p = &priv->fs->vlan->active_svlans_rule[vid];
 		MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
 				 outer_headers.svlan_tag);
 		MLX5_SET(fte_match_param, spec->match_value, outer_headers.svlan_tag, 1);
@@ -232,7 +232,7 @@ static int __mlx5e_add_vlan_rule(struct
 			 vid);
 		break;
 	default: /* MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID */
-		rule_p = &priv->fs.vlan->active_cvlans_rule[vid];
+		rule_p = &priv->fs->vlan->active_cvlans_rule[vid];
 		MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
 				 outer_headers.cvlan_tag);
 		MLX5_SET(fte_match_param, spec->match_value, outer_headers.cvlan_tag, 1);
@@ -282,33 +282,33 @@ static void mlx5e_del_vlan_rule(struct m
 {
 	switch (rule_type) {
 	case MLX5E_VLAN_RULE_TYPE_UNTAGGED:
-		if (priv->fs.vlan->untagged_rule) {
-			mlx5_del_flow_rules(priv->fs.vlan->untagged_rule);
-			priv->fs.vlan->untagged_rule = NULL;
+		if (priv->fs->vlan->untagged_rule) {
+			mlx5_del_flow_rules(priv->fs->vlan->untagged_rule);
+			priv->fs->vlan->untagged_rule = NULL;
 		}
 		break;
 	case MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID:
-		if (priv->fs.vlan->any_cvlan_rule) {
-			mlx5_del_flow_rules(priv->fs.vlan->any_cvlan_rule);
-			priv->fs.vlan->any_cvlan_rule = NULL;
+		if (priv->fs->vlan->any_cvlan_rule) {
+			mlx5_del_flow_rules(priv->fs->vlan->any_cvlan_rule);
+			priv->fs->vlan->any_cvlan_rule = NULL;
 		}
 		break;
 	case MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID:
-		if (priv->fs.vlan->any_svlan_rule) {
-			mlx5_del_flow_rules(priv->fs.vlan->any_svlan_rule);
-			priv->fs.vlan->any_svlan_rule = NULL;
+		if (priv->fs->vlan->any_svlan_rule) {
+			mlx5_del_flow_rules(priv->fs->vlan->any_svlan_rule);
+			priv->fs->vlan->any_svlan_rule = NULL;
 		}
 		break;
 	case MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID:
-		if (priv->fs.vlan->active_svlans_rule[vid]) {
-			mlx5_del_flow_rules(priv->fs.vlan->active_svlans_rule[vid]);
-			priv->fs.vlan->active_svlans_rule[vid] = NULL;
+		if (priv->fs->vlan->active_svlans_rule[vid]) {
+			mlx5_del_flow_rules(priv->fs->vlan->active_svlans_rule[vid]);
+			priv->fs->vlan->active_svlans_rule[vid] = NULL;
 		}
 		break;
 	case MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID:
-		if (priv->fs.vlan->active_cvlans_rule[vid]) {
-			mlx5_del_flow_rules(priv->fs.vlan->active_cvlans_rule[vid]);
-			priv->fs.vlan->active_cvlans_rule[vid] = NULL;
+		if (priv->fs->vlan->active_cvlans_rule[vid]) {
+			mlx5_del_flow_rules(priv->fs->vlan->active_cvlans_rule[vid]);
+			priv->fs->vlan->active_cvlans_rule[vid] = NULL;
 		}
 		mlx5e_vport_context_update_vlans(priv);
 		break;
@@ -355,62 +355,62 @@ mlx5e_add_trap_rule(struct mlx5_flow_tab
 
 int mlx5e_add_vlan_trap(struct mlx5e_priv *priv, int trap_id, int tir_num)
 {
-	struct mlx5_flow_table *ft = priv->fs.vlan->ft.t;
+	struct mlx5_flow_table *ft = priv->fs->vlan->ft.t;
 	struct mlx5_flow_handle *rule;
 	int err;
 
 	rule = mlx5e_add_trap_rule(ft, trap_id, tir_num);
 	if (IS_ERR(rule)) {
 		err = PTR_ERR(rule);
-		priv->fs.vlan->trap_rule = NULL;
+		priv->fs->vlan->trap_rule = NULL;
 		netdev_err(priv->netdev, "%s: add VLAN trap rule failed, err %d\n",
 			   __func__, err);
 		return err;
 	}
-	priv->fs.vlan->trap_rule = rule;
+	priv->fs->vlan->trap_rule = rule;
 	return 0;
 }
 
 void mlx5e_remove_vlan_trap(struct mlx5e_priv *priv)
 {
-	if (priv->fs.vlan->trap_rule) {
-		mlx5_del_flow_rules(priv->fs.vlan->trap_rule);
-		priv->fs.vlan->trap_rule = NULL;
+	if (priv->fs->vlan->trap_rule) {
+		mlx5_del_flow_rules(priv->fs->vlan->trap_rule);
+		priv->fs->vlan->trap_rule = NULL;
 	}
 }
 
 int mlx5e_add_mac_trap(struct mlx5e_priv *priv, int trap_id, int tir_num)
 {
-	struct mlx5_flow_table *ft = priv->fs.l2.ft.t;
+	struct mlx5_flow_table *ft = priv->fs->l2.ft.t;
 	struct mlx5_flow_handle *rule;
 	int err;
 
 	rule = mlx5e_add_trap_rule(ft, trap_id, tir_num);
 	if (IS_ERR(rule)) {
 		err = PTR_ERR(rule);
-		priv->fs.l2.trap_rule = NULL;
+		priv->fs->l2.trap_rule = NULL;
 		netdev_err(priv->netdev, "%s: add MAC trap rule failed, err %d\n",
 			   __func__, err);
 		return err;
 	}
-	priv->fs.l2.trap_rule = rule;
+	priv->fs->l2.trap_rule = rule;
 	return 0;
 }
 
 void mlx5e_remove_mac_trap(struct mlx5e_priv *priv)
 {
-	if (priv->fs.l2.trap_rule) {
-		mlx5_del_flow_rules(priv->fs.l2.trap_rule);
-		priv->fs.l2.trap_rule = NULL;
+	if (priv->fs->l2.trap_rule) {
+		mlx5_del_flow_rules(priv->fs->l2.trap_rule);
+		priv->fs->l2.trap_rule = NULL;
 	}
 }
 
 void mlx5e_enable_cvlan_filter(struct mlx5e_priv *priv)
 {
-	if (!priv->fs.vlan->cvlan_filter_disabled)
+	if (!priv->fs->vlan->cvlan_filter_disabled)
 		return;
 
-	priv->fs.vlan->cvlan_filter_disabled = false;
+	priv->fs->vlan->cvlan_filter_disabled = false;
 	if (priv->netdev->flags & IFF_PROMISC)
 		return;
 	mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0);
@@ -418,10 +418,10 @@ void mlx5e_enable_cvlan_filter(struct ml
 
 void mlx5e_disable_cvlan_filter(struct mlx5e_priv *priv)
 {
-	if (priv->fs.vlan->cvlan_filter_disabled)
+	if (priv->fs->vlan->cvlan_filter_disabled)
 		return;
 
-	priv->fs.vlan->cvlan_filter_disabled = true;
+	priv->fs->vlan->cvlan_filter_disabled = true;
 	if (priv->netdev->flags & IFF_PROMISC)
 		return;
 	mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0);
@@ -431,11 +431,11 @@ static int mlx5e_vlan_rx_add_cvid(struct
 {
 	int err;
 
-	set_bit(vid, priv->fs.vlan->active_cvlans);
+	set_bit(vid, priv->fs->vlan->active_cvlans);
 
 	err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID, vid);
 	if (err)
-		clear_bit(vid, priv->fs.vlan->active_cvlans);
+		clear_bit(vid, priv->fs->vlan->active_cvlans);
 
 	return err;
 }
@@ -445,11 +445,11 @@ static int mlx5e_vlan_rx_add_svid(struct
 	struct net_device *netdev = priv->netdev;
 	int err;
 
-	set_bit(vid, priv->fs.vlan->active_svlans);
+	set_bit(vid, priv->fs->vlan->active_svlans);
 
 	err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID, vid);
 	if (err) {
-		clear_bit(vid, priv->fs.vlan->active_svlans);
+		clear_bit(vid, priv->fs->vlan->active_svlans);
 		return err;
 	}
 
@@ -481,10 +481,10 @@ int mlx5e_vlan_rx_kill_vid(struct net_de
 		return 0; /* no vlan table for uplink rep */
 
 	if (be16_to_cpu(proto) == ETH_P_8021Q) {
-		clear_bit(vid, priv->fs.vlan->active_cvlans);
+		clear_bit(vid, priv->fs->vlan->active_cvlans);
 		mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID, vid);
 	} else if (be16_to_cpu(proto) == ETH_P_8021AD) {
-		clear_bit(vid, priv->fs.vlan->active_svlans);
+		clear_bit(vid, priv->fs->vlan->active_svlans);
 		mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID, vid);
 		netdev_update_features(dev);
 	}
@@ -498,14 +498,14 @@ static void mlx5e_add_vlan_rules(struct
 
 	mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
 
-	for_each_set_bit(i, priv->fs.vlan->active_cvlans, VLAN_N_VID) {
+	for_each_set_bit(i, priv->fs->vlan->active_cvlans, VLAN_N_VID) {
 		mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID, i);
 	}
 
-	for_each_set_bit(i, priv->fs.vlan->active_svlans, VLAN_N_VID)
+	for_each_set_bit(i, priv->fs->vlan->active_svlans, VLAN_N_VID)
 		mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID, i);
 
-	if (priv->fs.vlan->cvlan_filter_disabled)
+	if (priv->fs->vlan->cvlan_filter_disabled)
 		mlx5e_add_any_vid_rules(priv);
 }
 
@@ -515,11 +515,11 @@ static void mlx5e_del_vlan_rules(struct
 
 	mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
 
-	for_each_set_bit(i, priv->fs.vlan->active_cvlans, VLAN_N_VID) {
+	for_each_set_bit(i, priv->fs->vlan->active_cvlans, VLAN_N_VID) {
 		mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID, i);
 	}
 
-	for_each_set_bit(i, priv->fs.vlan->active_svlans, VLAN_N_VID)
+	for_each_set_bit(i, priv->fs->vlan->active_svlans, VLAN_N_VID)
 		mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID, i);
 
 	WARN_ON_ONCE(!(test_bit(MLX5E_STATE_DESTROYING, &priv->state)));
@@ -529,7 +529,7 @@ static void mlx5e_del_vlan_rules(struct
 	/* must be called after DESTROY bit is set and
 	 * set_rx_mode is called and flushed
 	 */
-	if (priv->fs.vlan->cvlan_filter_disabled)
+	if (priv->fs->vlan->cvlan_filter_disabled)
 		mlx5e_del_any_vid_rules(priv);
 }
 
@@ -576,14 +576,14 @@ static void mlx5e_sync_netdev_addr(struc
 
 	netif_addr_lock_bh(netdev);
 
-	mlx5e_add_l2_to_hash(priv->fs.l2.netdev_uc,
+	mlx5e_add_l2_to_hash(priv->fs->l2.netdev_uc,
 			     priv->netdev->dev_addr);
 
 	netdev_for_each_uc_addr(ha, netdev)
-		mlx5e_add_l2_to_hash(priv->fs.l2.netdev_uc, ha->addr);
+		mlx5e_add_l2_to_hash(priv->fs->l2.netdev_uc, ha->addr);
 
 	netdev_for_each_mc_addr(ha, netdev)
-		mlx5e_add_l2_to_hash(priv->fs.l2.netdev_mc, ha->addr);
+		mlx5e_add_l2_to_hash(priv->fs->l2.netdev_mc, ha->addr);
 
 	netif_addr_unlock_bh(netdev);
 }
@@ -599,11 +599,11 @@ static void mlx5e_fill_addr_array(struct
 	int i = 0;
 	int hi;
 
-	addr_list = is_uc ? priv->fs.l2.netdev_uc : priv->fs.l2.netdev_mc;
+	addr_list = is_uc ? priv->fs->l2.netdev_uc : priv->fs->l2.netdev_mc;
 
 	if (is_uc) /* Make sure our own address is pushed first */
 		ether_addr_copy(addr_array[i++], ndev->dev_addr);
-	else if (priv->fs.l2.broadcast_enabled)
+	else if (priv->fs->l2.broadcast_enabled)
 		ether_addr_copy(addr_array[i++], ndev->broadcast);
 
 	mlx5e_for_each_hash_node(hn, tmp, addr_list, hi) {
@@ -628,12 +628,12 @@ static void mlx5e_vport_context_update_a
 	int err;
 	int hi;
 
-	size = is_uc ? 0 : (priv->fs.l2.broadcast_enabled ? 1 : 0);
+	size = is_uc ? 0 : (priv->fs->l2.broadcast_enabled ? 1 : 0);
 	max_size = is_uc ?
 		1 << MLX5_CAP_GEN(priv->mdev, log_max_current_uc_list) :
 		1 << MLX5_CAP_GEN(priv->mdev, log_max_current_mc_list);
 
-	addr_list = is_uc ? priv->fs.l2.netdev_uc : priv->fs.l2.netdev_mc;
+	addr_list = is_uc ? priv->fs->l2.netdev_uc : priv->fs->l2.netdev_mc;
 	mlx5e_for_each_hash_node(hn, tmp, addr_list, hi)
 		size++;
 
@@ -664,7 +664,7 @@ out:
 
 static void mlx5e_vport_context_update(struct mlx5e_priv *priv)
 {
-	struct mlx5e_l2_table *ea = &priv->fs.l2;
+	struct mlx5e_l2_table *ea = &priv->fs->l2;
 
 	mlx5e_vport_context_update_addr_list(priv, MLX5_NVPRT_LIST_TYPE_UC);
 	mlx5e_vport_context_update_addr_list(priv, MLX5_NVPRT_LIST_TYPE_MC);
@@ -679,10 +679,10 @@ static void mlx5e_apply_netdev_addr(stru
 	struct hlist_node *tmp;
 	int i;
 
-	mlx5e_for_each_hash_node(hn, tmp, priv->fs.l2.netdev_uc, i)
+	mlx5e_for_each_hash_node(hn, tmp, priv->fs->l2.netdev_uc, i)
 		mlx5e_execute_l2_action(priv, hn);
 
-	mlx5e_for_each_hash_node(hn, tmp, priv->fs.l2.netdev_mc, i)
+	mlx5e_for_each_hash_node(hn, tmp, priv->fs->l2.netdev_mc, i)
 		mlx5e_execute_l2_action(priv, hn);
 }
 
@@ -692,9 +692,9 @@ static void mlx5e_handle_netdev_addr(str
 	struct hlist_node *tmp;
 	int i;
 
-	mlx5e_for_each_hash_node(hn, tmp, priv->fs.l2.netdev_uc, i)
+	mlx5e_for_each_hash_node(hn, tmp, priv->fs->l2.netdev_uc, i)
 		hn->action = MLX5E_ACTION_DEL;
-	mlx5e_for_each_hash_node(hn, tmp, priv->fs.l2.netdev_mc, i)
+	mlx5e_for_each_hash_node(hn, tmp, priv->fs->l2.netdev_mc, i)
 		hn->action = MLX5E_ACTION_DEL;
 
 	if (!test_bit(MLX5E_STATE_DESTROYING, &priv->state))
@@ -708,7 +708,7 @@ static void mlx5e_handle_netdev_addr(str
 
 static int mlx5e_add_promisc_rule(struct mlx5e_priv *priv)
 {
-	struct mlx5_flow_table *ft = priv->fs.promisc.ft.t;
+	struct mlx5_flow_table *ft = priv->fs->promisc.ft.t;
 	struct mlx5_flow_destination dest = {};
 	struct mlx5_flow_handle **rule_p;
 	MLX5_DECLARE_FLOW_ACT(flow_act);
@@ -719,9 +719,9 @@ static int mlx5e_add_promisc_rule(struct
 	if (!spec)
 		return -ENOMEM;
 	dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
-	dest.ft = mlx5_get_ttc_flow_table(priv->fs.ttc);
+	dest.ft = mlx5_get_ttc_flow_table(priv->fs->ttc);
 
-	rule_p = &priv->fs.promisc.rule;
+	rule_p = &priv->fs->promisc.rule;
 	*rule_p = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
 	if (IS_ERR(*rule_p)) {
 		err = PTR_ERR(*rule_p);
@@ -734,7 +734,7 @@ static int mlx5e_add_promisc_rule(struct
 
 static int mlx5e_create_promisc_table(struct mlx5e_priv *priv)
 {
-	struct mlx5e_flow_table *ft = &priv->fs.promisc.ft;
+	struct mlx5e_flow_table *ft = &priv->fs->promisc.ft;
 	struct mlx5_flow_table_attr ft_attr = {};
 	int err;
 
@@ -743,7 +743,7 @@ static int mlx5e_create_promisc_table(st
 	ft_attr.level = MLX5E_PROMISC_FT_LEVEL;
 	ft_attr.prio = MLX5E_NIC_PRIO;
 
-	ft->t = mlx5_create_auto_grouped_flow_table(priv->fs.ns, &ft_attr);
+	ft->t = mlx5_create_auto_grouped_flow_table(priv->fs->ns, &ft_attr);
 	if (IS_ERR(ft->t)) {
 		err = PTR_ERR(ft->t);
 		netdev_err(priv->netdev, "fail to create promisc table err=%d\n", err);
@@ -765,19 +765,19 @@ err_destroy_promisc_table:
 
 static void mlx5e_del_promisc_rule(struct mlx5e_priv *priv)
 {
-	if (WARN(!priv->fs.promisc.rule, "Trying to remove non-existing promiscuous rule"))
+	if (WARN(!priv->fs->promisc.rule, "Trying to remove non-existing promiscuous rule"))
 		return;
-	mlx5_del_flow_rules(priv->fs.promisc.rule);
-	priv->fs.promisc.rule = NULL;
+	mlx5_del_flow_rules(priv->fs->promisc.rule);
+	priv->fs->promisc.rule = NULL;
 }
 
 static void mlx5e_destroy_promisc_table(struct mlx5e_priv *priv)
 {
-	if (WARN(!priv->fs.promisc.ft.t, "Trying to remove non-existing promiscuous table"))
+	if (WARN(!priv->fs->promisc.ft.t, "Trying to remove non-existing promiscuous table"))
 		return;
 	mlx5e_del_promisc_rule(priv);
-	mlx5_destroy_flow_table(priv->fs.promisc.ft.t);
-	priv->fs.promisc.ft.t = NULL;
+	mlx5_destroy_flow_table(priv->fs->promisc.ft.t);
+	priv->fs->promisc.ft.t = NULL;
 }
 
 void mlx5e_set_rx_mode_work(struct work_struct *work)
@@ -785,7 +785,7 @@ void mlx5e_set_rx_mode_work(struct work_
 	struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
 					       set_rx_mode_work);
 
-	struct mlx5e_l2_table *ea = &priv->fs.l2;
+	struct mlx5e_l2_table *ea = &priv->fs->l2;
 	struct net_device *ndev = priv->netdev;
 
 	bool rx_mode_enable   = !test_bit(MLX5E_STATE_DESTROYING, &priv->state);
@@ -844,7 +844,7 @@ static void mlx5e_destroy_groups(struct
 
 void mlx5e_init_l2_addr(struct mlx5e_priv *priv)
 {
-	ether_addr_copy(priv->fs.l2.broadcast.addr, priv->netdev->broadcast);
+	ether_addr_copy(priv->fs->l2.broadcast.addr, priv->netdev->broadcast);
 }
 
 void mlx5e_destroy_flow_table(struct mlx5e_flow_table *ft)
@@ -906,7 +906,7 @@ void mlx5e_set_ttc_params(struct mlx5e_p
 		ttc_params->tunnel_dests[tt].type =
 			MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
 		ttc_params->tunnel_dests[tt].ft =
-			mlx5_get_ttc_flow_table(priv->fs.inner_ttc);
+			mlx5_get_ttc_flow_table(priv->fs->inner_ttc);
 	}
 }
 
@@ -922,7 +922,7 @@ static void mlx5e_del_l2_flow_rule(struc
 static int mlx5e_add_l2_flow_rule(struct mlx5e_priv *priv,
 				  struct mlx5e_l2_rule *ai, int type)
 {
-	struct mlx5_flow_table *ft = priv->fs.l2.ft.t;
+	struct mlx5_flow_table *ft = priv->fs->l2.ft.t;
 	struct mlx5_flow_destination dest = {};
 	MLX5_DECLARE_FLOW_ACT(flow_act);
 	struct mlx5_flow_spec *spec;
@@ -940,7 +940,7 @@ static int mlx5e_add_l2_flow_rule(struct
 			       outer_headers.dmac_47_16);
 
 	dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
-	dest.ft = mlx5_get_ttc_flow_table(priv->fs.ttc);
+	dest.ft = mlx5_get_ttc_flow_table(priv->fs->ttc);
 
 	switch (type) {
 	case MLX5E_FULLMATCH:
@@ -1045,12 +1045,12 @@ err_destroy_groups:
 
 static void mlx5e_destroy_l2_table(struct mlx5e_priv *priv)
 {
-	mlx5e_destroy_flow_table(&priv->fs.l2.ft);
+	mlx5e_destroy_flow_table(&priv->fs->l2.ft);
 }
 
 static int mlx5e_create_l2_table(struct mlx5e_priv *priv)
 {
-	struct mlx5e_l2_table *l2_table = &priv->fs.l2;
+	struct mlx5e_l2_table *l2_table = &priv->fs->l2;
 	struct mlx5e_flow_table *ft = &l2_table->ft;
 	struct mlx5_flow_table_attr ft_attr = {};
 	int err;
@@ -1061,7 +1061,7 @@ static int mlx5e_create_l2_table(struct
 	ft_attr.level = MLX5E_L2_FT_LEVEL;
 	ft_attr.prio = MLX5E_NIC_PRIO;
 
-	ft->t = mlx5_create_flow_table(priv->fs.ns, &ft_attr);
+	ft->t = mlx5_create_flow_table(priv->fs->ns, &ft_attr);
 	if (IS_ERR(ft->t)) {
 		err = PTR_ERR(ft->t);
 		ft->t = NULL;
@@ -1187,14 +1187,14 @@ static int mlx5e_create_vlan_table(struc
 	struct mlx5e_flow_table *ft;
 	int err;
 
-	ft = &priv->fs.vlan->ft;
+	ft = &priv->fs->vlan->ft;
 	ft->num_groups = 0;
 
 	ft_attr.max_fte = MLX5E_VLAN_TABLE_SIZE;
 	ft_attr.level = MLX5E_VLAN_FT_LEVEL;
 	ft_attr.prio = MLX5E_NIC_PRIO;
 
-	ft->t = mlx5_create_flow_table(priv->fs.ns, &ft_attr);
+	ft->t = mlx5_create_flow_table(priv->fs->ns, &ft_attr);
 	if (IS_ERR(ft->t))
 		return PTR_ERR(ft->t);
 
@@ -1223,19 +1223,19 @@ err_destroy_vlan_table:
 static void mlx5e_destroy_vlan_table(struct mlx5e_priv *priv)
 {
 	mlx5e_del_vlan_rules(priv);
-	mlx5e_destroy_flow_table(&priv->fs.vlan->ft);
+	mlx5e_destroy_flow_table(&priv->fs->vlan->ft);
 }
 
 static void mlx5e_destroy_inner_ttc_table(struct mlx5e_priv *priv)
 {
 	if (!mlx5_tunnel_inner_ft_supported(priv->mdev))
 		return;
-	mlx5_destroy_ttc_table(priv->fs.inner_ttc);
+	mlx5_destroy_ttc_table(priv->fs->inner_ttc);
 }
 
 void mlx5e_destroy_ttc_table(struct mlx5e_priv *priv)
 {
-	mlx5_destroy_ttc_table(priv->fs.ttc);
+	mlx5_destroy_ttc_table(priv->fs->ttc);
 }
 
 static int mlx5e_create_inner_ttc_table(struct mlx5e_priv *priv)
@@ -1246,10 +1246,10 @@ static int mlx5e_create_inner_ttc_table(
 		return 0;
 
 	mlx5e_set_inner_ttc_params(priv, &ttc_params);
-	priv->fs.inner_ttc = mlx5_create_inner_ttc_table(priv->mdev,
-							 &ttc_params);
-	if (IS_ERR(priv->fs.inner_ttc))
-		return PTR_ERR(priv->fs.inner_ttc);
+	priv->fs->inner_ttc = mlx5_create_inner_ttc_table(priv->mdev,
+							  &ttc_params);
+	if (IS_ERR(priv->fs->inner_ttc))
+		return PTR_ERR(priv->fs->inner_ttc);
 	return 0;
 }
 
@@ -1258,9 +1258,9 @@ int mlx5e_create_ttc_table(struct mlx5e_
 	struct ttc_params ttc_params = {};
 
 	mlx5e_set_ttc_params(priv, &ttc_params, true);
-	priv->fs.ttc = mlx5_create_ttc_table(priv->mdev, &ttc_params);
-	if (IS_ERR(priv->fs.ttc))
-		return PTR_ERR(priv->fs.ttc);
+	priv->fs->ttc = mlx5_create_ttc_table(priv->mdev, &ttc_params);
+	if (IS_ERR(priv->fs->ttc))
+		return PTR_ERR(priv->fs->ttc);
 	return 0;
 }
 
@@ -1268,10 +1268,10 @@ int mlx5e_create_flow_steering(struct ml
 {
 	int err;
 
-	priv->fs.ns = mlx5_get_flow_namespace(priv->mdev,
+	priv->fs->ns = mlx5_get_flow_namespace(priv->mdev,
 					       MLX5_FLOW_NAMESPACE_KERNEL);
 
-	if (!priv->fs.ns)
+	if (!priv->fs->ns)
 		return -EOPNOTSUPP;
 
 	err = mlx5e_arfs_create_tables(priv);
@@ -1369,31 +1369,39 @@ static void mlx5e_fs_tc_free(struct mlx5
 	mlx5e_tc_table_free(fs->tc);
 }
 
-int mlx5e_fs_init(struct mlx5e_priv *priv)
+struct mlx5e_flow_steering *mlx5e_fs_init(const struct mlx5e_profile *profile)
 {
+	struct mlx5e_flow_steering *fs;
 	int err;
 
-	if (mlx5e_profile_feature_cap(priv->profile, FS_VLAN)) {
-		err = mlx5e_fs_vlan_alloc(&priv->fs);
+	fs = kvzalloc(sizeof(*fs), GFP_KERNEL);
+	if (!fs)
+		goto err;
+
+	if (mlx5e_profile_feature_cap(profile, FS_VLAN)) {
+		err = mlx5e_fs_vlan_alloc(fs);
 		if (err)
-			goto err;
+			goto err_free_fs;
 	}
 
-	if (mlx5e_profile_feature_cap(priv->profile, FS_TC)) {
-		err = mlx5e_fs_tc_alloc(&priv->fs);
+	if (mlx5e_profile_feature_cap(profile, FS_TC)) {
+		err = mlx5e_fs_tc_alloc(fs);
 		if (err)
 			goto err_free_vlan;
 	}
 
-	return 0;
+	return fs;
+err_free_fs:
+	kvfree(fs);
 err_free_vlan:
-	mlx5e_fs_vlan_free(&priv->fs);
+	mlx5e_fs_vlan_free(fs);
 err:
-	return -ENOMEM;
+	return NULL;
 }
 
-void mlx5e_fs_cleanup(struct mlx5e_priv *priv)
+void mlx5e_fs_cleanup(struct mlx5e_flow_steering *fs)
 {
-	mlx5e_fs_tc_free(&priv->fs);
-	mlx5e_fs_vlan_free(&priv->fs);
+	mlx5e_fs_tc_free(fs);
+	mlx5e_fs_vlan_free(fs);
+	kvfree(fs);
 }
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
@@ -81,18 +81,18 @@ static struct mlx5e_ethtool_table *get_f
 	case UDP_V6_FLOW:
 		max_tuples = ETHTOOL_NUM_L3_L4_FTS;
 		prio = MLX5E_ETHTOOL_L3_L4_PRIO + (max_tuples - num_tuples);
-		eth_ft = &priv->fs.ethtool.l3_l4_ft[prio];
+		eth_ft = &priv->fs->ethtool.l3_l4_ft[prio];
 		break;
 	case IP_USER_FLOW:
 	case IPV6_USER_FLOW:
 		max_tuples = ETHTOOL_NUM_L3_L4_FTS;
 		prio = MLX5E_ETHTOOL_L3_L4_PRIO + (max_tuples - num_tuples);
-		eth_ft = &priv->fs.ethtool.l3_l4_ft[prio];
+		eth_ft = &priv->fs->ethtool.l3_l4_ft[prio];
 		break;
 	case ETHER_FLOW:
 		max_tuples = ETHTOOL_NUM_L2_FTS;
 		prio = max_tuples - num_tuples;
-		eth_ft = &priv->fs.ethtool.l2_ft[prio];
+		eth_ft = &priv->fs->ethtool.l2_ft[prio];
 		prio += MLX5E_ETHTOOL_L2_PRIO;
 		break;
 	default:
@@ -383,14 +383,14 @@ static void add_rule_to_list(struct mlx5
 			     struct mlx5e_ethtool_rule *rule)
 {
 	struct mlx5e_ethtool_rule *iter;
-	struct list_head *head = &priv->fs.ethtool.rules;
+	struct list_head *head = &priv->fs->ethtool.rules;
 
-	list_for_each_entry(iter, &priv->fs.ethtool.rules, list) {
+	list_for_each_entry(iter, &priv->fs->ethtool.rules, list) {
 		if (iter->flow_spec.location > rule->flow_spec.location)
 			break;
 		head = &iter->list;
 	}
-	priv->fs.ethtool.tot_num_rules++;
+	priv->fs->ethtool.tot_num_rules++;
 	list_add(&rule->list, head);
 }
 
@@ -507,7 +507,7 @@ static void del_ethtool_rule(struct mlx5
 	if (eth_rule->rss)
 		mlx5e_rss_refcnt_dec(eth_rule->rss);
 	list_del(&eth_rule->list);
-	priv->fs.ethtool.tot_num_rules--;
+	priv->fs->ethtool.tot_num_rules--;
 	put_flow_table(eth_rule->eth_ft);
 	kfree(eth_rule);
 }
@@ -517,7 +517,7 @@ static struct mlx5e_ethtool_rule *find_e
 {
 	struct mlx5e_ethtool_rule *iter;
 
-	list_for_each_entry(iter, &priv->fs.ethtool.rules, list) {
+	list_for_each_entry(iter, &priv->fs->ethtool.rules, list) {
 		if (iter->flow_spec.location == location)
 			return iter;
 	}
@@ -788,7 +788,7 @@ mlx5e_ethtool_get_flow(struct mlx5e_priv
 	if (location < 0 || location >= MAX_NUM_OF_ETHTOOL_RULES)
 		return -EINVAL;
 
-	list_for_each_entry(eth_rule, &priv->fs.ethtool.rules, list) {
+	list_for_each_entry(eth_rule, &priv->fs->ethtool.rules, list) {
 		int index;
 
 		if (eth_rule->flow_spec.location != location)
@@ -831,13 +831,13 @@ void mlx5e_ethtool_cleanup_steering(stru
 	struct mlx5e_ethtool_rule *iter;
 	struct mlx5e_ethtool_rule *temp;
 
-	list_for_each_entry_safe(iter, temp, &priv->fs.ethtool.rules, list)
+	list_for_each_entry_safe(iter, temp, &priv->fs->ethtool.rules, list)
 		del_ethtool_rule(priv, iter);
 }
 
 void mlx5e_ethtool_init_steering(struct mlx5e_priv *priv)
 {
-	INIT_LIST_HEAD(&priv->fs.ethtool.rules);
+	INIT_LIST_HEAD(&priv->fs->ethtool.rules);
 }
 
 static int flow_type_to_traffic_type(u32 flow_type)
@@ -963,7 +963,7 @@ int mlx5e_ethtool_get_rxnfc(struct mlx5e
 
 	switch (info->cmd) {
 	case ETHTOOL_GRXCLSRLCNT:
-		info->rule_cnt = priv->fs.ethtool.tot_num_rules;
+		info->rule_cnt = priv->fs->ethtool.tot_num_rules;
 		break;
 	case ETHTOOL_GRXCLSRULE:
 		err = mlx5e_ethtool_get_flow(priv, info, info->fs.location);
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -3889,8 +3889,8 @@ static netdev_features_t mlx5e_fix_featu
 
 	mutex_lock(&priv->state_lock);
 	params = &priv->channels.params;
-	if (!priv->fs.vlan ||
-	    !bitmap_empty(mlx5e_vlan_get_active_svlans(priv->fs.vlan), VLAN_N_VID)) {
+	if (!priv->fs->vlan ||
+	    !bitmap_empty(mlx5e_vlan_get_active_svlans(priv->fs->vlan), VLAN_N_VID)) {
 		/* HW strips the outer C-tag header, this is a problem
 		 * for S-tag traffic.
 		 */
@@ -5012,6 +5012,7 @@ static int mlx5e_nic_init(struct mlx5_co
 			  struct net_device *netdev)
 {
 	struct mlx5e_priv *priv = netdev_priv(netdev);
+	struct mlx5e_flow_steering *fs;
 	int err;
 
 	mlx5e_build_nic_params(priv, &priv->xsk, netdev->mtu);
@@ -5019,11 +5020,13 @@ static int mlx5e_nic_init(struct mlx5_co
 
 	mlx5e_timestamp_init(priv);
 
-	err = mlx5e_fs_init(priv);
-	if (err) {
+	fs = mlx5e_fs_init(priv->profile);
+	if (!fs) {
+		err = -ENOMEM;
 		mlx5_core_err(mdev, "FS initialization failed, %d\n", err);
 		return err;
 	}
+	priv->fs = fs;
 
 	err = mlx5e_ipsec_init(priv);
 	if (err)
@@ -5042,7 +5045,7 @@ static void mlx5e_nic_cleanup(struct mlx
 	mlx5e_health_destroy_reporters(priv);
 	mlx5e_ktls_cleanup(priv);
 	mlx5e_ipsec_cleanup(priv);
-	mlx5e_fs_cleanup(priv);
+	mlx5e_fs_cleanup(priv->fs);
 }
 
 static int mlx5e_init_nic_rx(struct mlx5e_priv *priv)
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
@@ -718,6 +718,7 @@ static int mlx5e_init_ul_rep(struct mlx5
 
 static void mlx5e_cleanup_rep(struct mlx5e_priv *priv)
 {
+	mlx5e_fs_cleanup(priv->fs);
 	mlx5e_ipsec_cleanup(priv);
 }
 
@@ -728,8 +729,8 @@ static int mlx5e_create_rep_ttc_table(st
 	struct ttc_params ttc_params = {};
 	int err;
 
-	priv->fs.ns = mlx5_get_flow_namespace(priv->mdev,
-					      MLX5_FLOW_NAMESPACE_KERNEL);
+	priv->fs->ns = mlx5_get_flow_namespace(priv->mdev,
+					       MLX5_FLOW_NAMESPACE_KERNEL);
 
 	/* The inner_ttc in the ttc params is intentionally not set */
 	mlx5e_set_ttc_params(priv, &ttc_params, false);
@@ -738,9 +739,9 @@ static int mlx5e_create_rep_ttc_table(st
 		/* To give uplik rep TTC a lower level for chaining from root ft */
 		ttc_params.ft_attr.level = MLX5E_TTC_FT_LEVEL + 1;
 
-	priv->fs.ttc = mlx5_create_ttc_table(priv->mdev, &ttc_params);
-	if (IS_ERR(priv->fs.ttc)) {
-		err = PTR_ERR(priv->fs.ttc);
+	priv->fs->ttc = mlx5_create_ttc_table(priv->mdev, &ttc_params);
+	if (IS_ERR(priv->fs->ttc)) {
+		err = PTR_ERR(priv->fs->ttc);
 		netdev_err(priv->netdev, "Failed to create rep ttc table, err=%d\n",
 			   err);
 		return err;
@@ -760,7 +761,7 @@ static int mlx5e_create_rep_root_ft(stru
 		/* non uplik reps will skip any bypass tables and go directly to
 		 * their own ttc
 		 */
-		rpriv->root_ft = mlx5_get_ttc_flow_table(priv->fs.ttc);
+		rpriv->root_ft = mlx5_get_ttc_flow_table(priv->fs->ttc);
 		return 0;
 	}
 
@@ -835,9 +836,17 @@ static int mlx5e_init_rep_rx(struct mlx5
 	struct mlx5_core_dev *mdev = priv->mdev;
 	int err;
 
-	priv->rx_res = mlx5e_rx_res_alloc();
-	if (!priv->rx_res)
+	priv->fs = mlx5e_fs_init(priv->profile);
+	if (!priv->fs) {
+		netdev_err(priv->netdev, "FS allocation failed\n");
 		return -ENOMEM;
+	}
+
+	priv->rx_res = mlx5e_rx_res_alloc();
+	if (!priv->rx_res) {
+		err = -ENOMEM;
+		goto err_free_fs;
+	}
 
 	mlx5e_init_l2_addr(priv);
 
@@ -873,13 +882,15 @@ static int mlx5e_init_rep_rx(struct mlx5
 err_destroy_root_ft:
 	mlx5e_destroy_rep_root_ft(priv);
 err_destroy_ttc_table:
-	mlx5_destroy_ttc_table(priv->fs.ttc);
+	mlx5_destroy_ttc_table(priv->fs->ttc);
 err_destroy_rx_res:
 	mlx5e_rx_res_destroy(priv->rx_res);
 err_close_drop_rq:
 	mlx5e_close_drop_rq(&priv->drop_rq);
 	mlx5e_rx_res_free(priv->rx_res);
 	priv->rx_res = NULL;
+err_free_fs:
+	mlx5e_fs_cleanup(priv->fs);
 	return err;
 }
 
@@ -888,7 +899,7 @@ static void mlx5e_cleanup_rep_rx(struct
 	mlx5e_ethtool_cleanup_steering(priv);
 	rep_vport_rx_rule_destroy(priv);
 	mlx5e_destroy_rep_root_ft(priv);
-	mlx5_destroy_ttc_table(priv->fs.ttc);
+	mlx5_destroy_ttc_table(priv->fs->ttc);
 	mlx5e_rx_res_destroy(priv->rx_res);
 	mlx5e_close_drop_rq(&priv->drop_rq);
 	mlx5e_rx_res_free(priv->rx_res);
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -322,7 +322,7 @@ get_ct_priv(struct mlx5e_priv *priv)
 		return uplink_priv->ct_priv;
 	}
 
-	return priv->fs.tc->ct;
+	return priv->fs->tc->ct;
 }
 
 static struct mlx5e_tc_psample *
@@ -356,7 +356,7 @@ get_post_action(struct mlx5e_priv *priv)
 		return uplink_priv->post_act;
 	}
 
-	return priv->fs.tc->post_act;
+	return priv->fs->tc->post_act;
 }
 
 struct mlx5_flow_handle *
@@ -611,7 +611,7 @@ get_mod_hdr_table(struct mlx5e_priv *pri
 
 	return mlx5e_get_flow_namespace(flow) == MLX5_FLOW_NAMESPACE_FDB ?
 		&esw->offloads.mod_hdr :
-		&priv->fs.tc->mod_hdr;
+		&priv->fs->tc->mod_hdr;
 }
 
 static int mlx5e_attach_mod_hdr(struct mlx5e_priv *priv,
@@ -829,7 +829,7 @@ static int mlx5e_hairpin_rss_init(struct
 
 	netdev_dbg(priv->netdev, "add hairpin: using %d channels rss ttc table id %x\n",
 		   hp->num_channels,
-		   mlx5_get_ttc_flow_table(priv->fs.ttc)->id);
+		   mlx5_get_ttc_flow_table(priv->fs->ttc)->id);
 
 	return 0;
 
@@ -919,7 +919,7 @@ static struct mlx5e_hairpin_entry *mlx5e
 	struct mlx5e_hairpin_entry *hpe;
 	u32 hash_key = hash_hairpin_info(peer_vhca_id, prio);
 
-	hash_for_each_possible(priv->fs.tc->hairpin_tbl, hpe,
+	hash_for_each_possible(priv->fs->tc->hairpin_tbl, hpe,
 			       hairpin_hlist, hash_key) {
 		if (hpe->peer_vhca_id == peer_vhca_id && hpe->prio == prio) {
 			refcount_inc(&hpe->refcnt);
@@ -934,10 +934,10 @@ static void mlx5e_hairpin_put(struct mlx
 			      struct mlx5e_hairpin_entry *hpe)
 {
 	/* no more hairpin flows for us, release the hairpin pair */
-	if (!refcount_dec_and_mutex_lock(&hpe->refcnt, &priv->fs.tc->hairpin_tbl_lock))
+	if (!refcount_dec_and_mutex_lock(&hpe->refcnt, &priv->fs->tc->hairpin_tbl_lock))
 		return;
 	hash_del(&hpe->hairpin_hlist);
-	mutex_unlock(&priv->fs.tc->hairpin_tbl_lock);
+	mutex_unlock(&priv->fs->tc->hairpin_tbl_lock);
 
 	if (!IS_ERR_OR_NULL(hpe->hp)) {
 		netdev_dbg(priv->netdev, "del hairpin: peer %s\n",
@@ -1021,10 +1021,10 @@ static int mlx5e_hairpin_flow_add(struct
 	if (err)
 		return err;
 
-	mutex_lock(&priv->fs.tc->hairpin_tbl_lock);
+	mutex_lock(&priv->fs->tc->hairpin_tbl_lock);
 	hpe = mlx5e_hairpin_get(priv, peer_id, match_prio);
 	if (hpe) {
-		mutex_unlock(&priv->fs.tc->hairpin_tbl_lock);
+		mutex_unlock(&priv->fs->tc->hairpin_tbl_lock);
 		wait_for_completion(&hpe->res_ready);
 
 		if (IS_ERR(hpe->hp)) {
@@ -1036,7 +1036,7 @@ static int mlx5e_hairpin_flow_add(struct
 
 	hpe = kzalloc(sizeof(*hpe), GFP_KERNEL);
 	if (!hpe) {
-		mutex_unlock(&priv->fs.tc->hairpin_tbl_lock);
+		mutex_unlock(&priv->fs->tc->hairpin_tbl_lock);
 		return -ENOMEM;
 	}
 
@@ -1048,9 +1048,9 @@ static int mlx5e_hairpin_flow_add(struct
 	refcount_set(&hpe->refcnt, 1);
 	init_completion(&hpe->res_ready);
 
-	hash_add(priv->fs.tc->hairpin_tbl, &hpe->hairpin_hlist,
+	hash_add(priv->fs->tc->hairpin_tbl, &hpe->hairpin_hlist,
 		 hash_hairpin_info(peer_id, match_prio));
-	mutex_unlock(&priv->fs.tc->hairpin_tbl_lock);
+	mutex_unlock(&priv->fs->tc->hairpin_tbl_lock);
 
 	params.log_data_size = 16;
 	params.log_data_size = min_t(u8, params.log_data_size,
@@ -1127,7 +1127,7 @@ mlx5e_add_offloaded_nic_rule(struct mlx5
 {
 	struct mlx5_flow_context *flow_context = &spec->flow_context;
 	struct mlx5_nic_flow_attr *nic_attr = attr->nic_attr;
-	struct mlx5e_tc_table *tc = priv->fs.tc;
+	struct mlx5e_tc_table *tc = priv->fs->tc;
 	struct mlx5_flow_destination dest[2] = {};
 	struct mlx5_fs_chains *nic_chains;
 	struct mlx5_flow_act flow_act = {
@@ -1163,7 +1163,7 @@ mlx5e_add_offloaded_nic_rule(struct mlx5
 			if (IS_ERR(dest[dest_ix].ft))
 				return ERR_CAST(dest[dest_ix].ft);
 		} else {
-			dest[dest_ix].ft = mlx5e_vlan_get_flowtable(priv->fs.vlan);
+			dest[dest_ix].ft = mlx5e_vlan_get_flowtable(priv->fs->vlan);
 		}
 		dest_ix++;
 	}
@@ -1191,7 +1191,7 @@ mlx5e_add_offloaded_nic_rule(struct mlx5
 			mutex_unlock(&tc->t_lock);
 			netdev_err(priv->netdev,
 				   "Failed to create tc offload table\n");
-			rule = ERR_CAST(priv->fs.tc->t);
+			rule = ERR_CAST(priv->fs->tc->t);
 			goto err_ft_get;
 		}
 	}
@@ -1293,7 +1293,7 @@ void mlx5e_del_offloaded_nic_rule(struct
 				  struct mlx5_flow_handle *rule,
 				  struct mlx5_flow_attr *attr)
 {
-	struct mlx5_fs_chains *nic_chains = mlx5e_nic_chains(priv->fs.tc);
+	struct mlx5_fs_chains *nic_chains = mlx5e_nic_chains(priv->fs->tc);
 
 	mlx5_del_flow_rules(rule);
 
@@ -1310,7 +1310,7 @@ static void mlx5e_tc_del_nic_flow(struct
 				  struct mlx5e_tc_flow *flow)
 {
 	struct mlx5_flow_attr *attr = flow->attr;
-	struct mlx5e_tc_table *tc = priv->fs.tc;
+	struct mlx5e_tc_table *tc = priv->fs->tc;
 
 	flow_flag_clear(flow, OFFLOADED);
 
@@ -1322,13 +1322,13 @@ static void mlx5e_tc_del_nic_flow(struct
 	/* Remove root table if no rules are left to avoid
 	 * extra steering hops.
 	 */
-	mutex_lock(&priv->fs.tc->t_lock);
+	mutex_lock(&priv->fs->tc->t_lock);
 	if (!mlx5e_tc_num_filters(priv, MLX5_TC_FLAG(NIC_OFFLOAD)) &&
 	    !IS_ERR_OR_NULL(tc->t)) {
 		mlx5_chains_put_table(mlx5e_nic_chains(tc), 0, 1, MLX5E_TC_FT_LEVEL);
-		priv->fs.tc->t = NULL;
+		priv->fs->tc->t = NULL;
 	}
-	mutex_unlock(&priv->fs.tc->t_lock);
+	mutex_unlock(&priv->fs->tc->t_lock);
 
 	if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
 		mlx5e_detach_mod_hdr(priv, flow);
@@ -4064,7 +4064,7 @@ static struct rhashtable *get_tc_ht(stru
 		rpriv = priv->ppriv;
 		return &rpriv->tc_ht;
 	} else /* NIC offload */
-		return &priv->fs.tc->ht;
+		return &priv->fs->tc->ht;
 }
 
 static bool is_peer_flow_needed(struct mlx5e_tc_flow *flow)
@@ -4784,11 +4784,11 @@ static void mlx5e_tc_hairpin_update_dead
 
 	peer_vhca_id = MLX5_CAP_GEN(peer_mdev, vhca_id);
 
-	mutex_lock(&priv->fs.tc->hairpin_tbl_lock);
-	hash_for_each(priv->fs.tc->hairpin_tbl, bkt, hpe, hairpin_hlist)
+	mutex_lock(&priv->fs->tc->hairpin_tbl_lock);
+	hash_for_each(priv->fs->tc->hairpin_tbl, bkt, hpe, hairpin_hlist)
 		if (refcount_inc_not_zero(&hpe->refcnt))
 			list_add(&hpe->dead_peer_wait_list, &init_wait_list);
-	mutex_unlock(&priv->fs.tc->hairpin_tbl_lock);
+	mutex_unlock(&priv->fs->tc->hairpin_tbl_lock);
 
 	list_for_each_entry_safe(hpe, tmp, &init_wait_list, dead_peer_wait_list) {
 		wait_for_completion(&hpe->res_ready);
@@ -4842,7 +4842,7 @@ static int mlx5e_tc_nic_get_ft_size(stru
 
 static int mlx5e_tc_nic_create_miss_table(struct mlx5e_priv *priv)
 {
-	struct mlx5_flow_table **ft = &priv->fs.tc->miss_t;
+	struct mlx5_flow_table **ft = &priv->fs->tc->miss_t;
 	struct mlx5_flow_table_attr ft_attr = {};
 	struct mlx5_flow_namespace *ns;
 	int err = 0;
@@ -4864,12 +4864,12 @@ static int mlx5e_tc_nic_create_miss_tabl
 
 static void mlx5e_tc_nic_destroy_miss_table(struct mlx5e_priv *priv)
 {
-	mlx5_destroy_flow_table(priv->fs.tc->miss_t);
+	mlx5_destroy_flow_table(priv->fs->tc->miss_t);
 }
 
 int mlx5e_tc_nic_init(struct mlx5e_priv *priv)
 {
-	struct mlx5e_tc_table *tc = priv->fs.tc;
+	struct mlx5e_tc_table *tc = priv->fs->tc;
 	struct mlx5_core_dev *dev = priv->mdev;
 	struct mapping_ctx *chains_mapping;
 	struct mlx5_chains_attr attr = {};
@@ -4910,7 +4910,7 @@ int mlx5e_tc_nic_init(struct mlx5e_priv
 	attr.ns = MLX5_FLOW_NAMESPACE_KERNEL;
 	attr.max_ft_sz = mlx5e_tc_nic_get_ft_size(dev);
 	attr.max_grp_num = MLX5E_TC_TABLE_NUM_GROUPS;
-	attr.default_ft = priv->fs.tc->miss_t;
+	attr.default_ft = priv->fs->tc->miss_t;
 	attr.mapping = chains_mapping;
 
 	tc->chains = mlx5_chains_create(dev, &attr);
@@ -4959,7 +4959,7 @@ static void _mlx5e_tc_del_flow(void *ptr
 
 void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv)
 {
-	struct mlx5e_tc_table *tc = priv->fs.tc;
+	struct mlx5e_tc_table *tc = priv->fs->tc;
 
 	if (tc->netdevice_nb.notifier_call)
 		unregister_netdevice_notifier_dev_net(priv->netdev,
@@ -5164,7 +5164,7 @@ bool mlx5e_tc_update_skb(struct mlx5_cqe
 #if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
 	u32 chain = 0, chain_tag, reg_b, zone_restore_id;
 	struct mlx5e_priv *priv = netdev_priv(skb->dev);
-	struct mlx5e_tc_table *tc = priv->fs.tc;
+	struct mlx5e_tc_table *tc = priv->fs->tc;
 	struct mlx5_mapped_obj mapped_obj;
 	struct tc_skb_ext *tc_skb_ext;
 	int err;
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
@@ -322,10 +322,10 @@ static int mlx5i_create_flow_steering(st
 {
 	int err;
 
-	priv->fs.ns = mlx5_get_flow_namespace(priv->mdev,
+	priv->fs->ns = mlx5_get_flow_namespace(priv->mdev,
 					       MLX5_FLOW_NAMESPACE_KERNEL);
 
-	if (!priv->fs.ns)
+	if (!priv->fs->ns)
 		return -EINVAL;
 
 	err = mlx5e_arfs_create_tables(priv);
@@ -364,9 +364,17 @@ static int mlx5i_init_rx(struct mlx5e_pr
 	struct mlx5_core_dev *mdev = priv->mdev;
 	int err;
 
-	priv->rx_res = mlx5e_rx_res_alloc();
-	if (!priv->rx_res)
+	priv->fs = mlx5e_fs_init(priv->profile);
+	if (!priv->fs) {
+		netdev_err(priv->netdev, "FS allocation failed\n");
 		return -ENOMEM;
+	}
+
+	priv->rx_res = mlx5e_rx_res_alloc();
+	if (!priv->rx_res) {
+		err = -ENOMEM;
+		goto err_free_fs;
+	}
 
 	mlx5e_create_q_counters(priv);
 
@@ -397,6 +405,8 @@ err_destroy_q_counters:
 	mlx5e_destroy_q_counters(priv);
 	mlx5e_rx_res_free(priv->rx_res);
 	priv->rx_res = NULL;
+err_free_fs:
+	mlx5e_fs_cleanup(priv->fs);
 	return err;
 }
 
@@ -408,6 +418,7 @@ static void mlx5i_cleanup_rx(struct mlx5
 	mlx5e_destroy_q_counters(priv);
 	mlx5e_rx_res_free(priv->rx_res);
 	priv->rx_res = NULL;
+	mlx5e_fs_cleanup(priv->fs);
 }
 
 /* The stats groups order is opposite to the update_stats() order calls */