Blob Blame History Raw
From: Petr Machata <petrm@mellanox.com>
Date: Thu, 20 Sep 2018 09:21:25 +0300
Subject: mlxsw: spectrum_buffers: Use devlink pool indices throughout
Patch-mainline: v4.20-rc1
Git-commit: 3a4dbfb044ea37e9cabb1e2499859e423a96ab15
References: bsc#1112374

Currently, mlxsw assumes that each ingress pool has its egress
counterpart, and that pool index for purposes of caching matches the
index with which the hardware should be configured. As we want to expose
the MC pool, both of these assumptions break.

Instead, maintain the pool index as long as possible. Unify ingress and
egress caches and use the pool index as cache index as well. Only
translate to FW pool numbering when actually packing the registers. This
simplifies things considerably, as the pool index is the only quantity
necessary to uniquely identify a pool, and the pool/direction split is
not necessary until firmware is talked to.

To support the mapping between pool indices and pool numbers and
directions, which is not neatly mathematical anymore, introduce a pool
descriptor table, indexed by pool index, to facilitate the translation.

Include the MC pool in the descriptor table as well, so that it can be
referenced from mlxsw_sp_sb_cms_egress.

Signed-off-by: Petr Machata <petrm@mellanox.com>
Reviewed-by: Jiri Pirko <jiri@mellanox.com>
Signed-off-by: Ido Schimmel <idosch@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
---
 drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c |  375 +++++++----------
 1 file changed, 170 insertions(+), 205 deletions(-)

--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
@@ -25,7 +25,7 @@ struct mlxsw_cp_sb_occ {
 struct mlxsw_sp_sb_cm {
 	u32 min_buff;
 	u32 max_buff;
-	u8 pool;
+	u16 pool_index;
 	struct mlxsw_cp_sb_occ occ;
 };
 
@@ -35,16 +35,35 @@ struct mlxsw_sp_sb_pm {
 	struct mlxsw_cp_sb_occ occ;
 };
 
-#define MLXSW_SP_SB_POOL_COUNT	4
+struct mlxsw_sp_sb_pool_des {
+	enum mlxsw_reg_sbxx_dir dir;
+	u8 pool;
+};
+
+/* Order ingress pools before egress pools. */
+static const struct mlxsw_sp_sb_pool_des mlxsw_sp_sb_pool_dess[] = {
+	{MLXSW_REG_SBXX_DIR_INGRESS, 0},
+	{MLXSW_REG_SBXX_DIR_INGRESS, 1},
+	{MLXSW_REG_SBXX_DIR_INGRESS, 2},
+	{MLXSW_REG_SBXX_DIR_INGRESS, 3},
+	{MLXSW_REG_SBXX_DIR_EGRESS, 0},
+	{MLXSW_REG_SBXX_DIR_EGRESS, 1},
+	{MLXSW_REG_SBXX_DIR_EGRESS, 2},
+	{MLXSW_REG_SBXX_DIR_EGRESS, 3},
+	{MLXSW_REG_SBXX_DIR_EGRESS, 15},
+};
+
+#define MLXSW_SP_SB_POOL_DESS_LEN ARRAY_SIZE(mlxsw_sp_sb_pool_dess)
+
 #define MLXSW_SP_SB_TC_COUNT	8
 
 struct mlxsw_sp_sb_port {
 	struct mlxsw_sp_sb_cm cms[2][MLXSW_SP_SB_TC_COUNT];
-	struct mlxsw_sp_sb_pm pms[2][MLXSW_SP_SB_POOL_COUNT];
+	struct mlxsw_sp_sb_pm pms[MLXSW_SP_SB_POOL_DESS_LEN];
 };
 
 struct mlxsw_sp_sb {
-	struct mlxsw_sp_sb_pr prs[2][MLXSW_SP_SB_POOL_COUNT];
+	struct mlxsw_sp_sb_pr prs[MLXSW_SP_SB_POOL_DESS_LEN];
 	struct mlxsw_sp_sb_port *ports;
 	u32 cell_size;
 };
@@ -60,10 +79,9 @@ u32 mlxsw_sp_bytes_cells(const struct ml
 }
 
 static struct mlxsw_sp_sb_pr *mlxsw_sp_sb_pr_get(struct mlxsw_sp *mlxsw_sp,
-						 u8 pool,
-						 enum mlxsw_reg_sbxx_dir dir)
+						 u16 pool_index)
 {
-	return &mlxsw_sp->sb->prs[dir][pool];
+	return &mlxsw_sp->sb->prs[pool_index];
 }
 
 static struct mlxsw_sp_sb_cm *mlxsw_sp_sb_cm_get(struct mlxsw_sp *mlxsw_sp,
@@ -74,81 +92,87 @@ static struct mlxsw_sp_sb_cm *mlxsw_sp_s
 }
 
 static struct mlxsw_sp_sb_pm *mlxsw_sp_sb_pm_get(struct mlxsw_sp *mlxsw_sp,
-						 u8 local_port, u8 pool,
-						 enum mlxsw_reg_sbxx_dir dir)
+						 u8 local_port, u16 pool_index)
 {
-	return &mlxsw_sp->sb->ports[local_port].pms[dir][pool];
+	return &mlxsw_sp->sb->ports[local_port].pms[pool_index];
 }
 
-static int mlxsw_sp_sb_pr_write(struct mlxsw_sp *mlxsw_sp, u8 pool,
-				enum mlxsw_reg_sbxx_dir dir,
+static int mlxsw_sp_sb_pr_write(struct mlxsw_sp *mlxsw_sp, u16 pool_index,
 				enum mlxsw_reg_sbpr_mode mode, u32 size)
 {
+	const struct mlxsw_sp_sb_pool_des *des =
+		&mlxsw_sp_sb_pool_dess[pool_index];
 	char sbpr_pl[MLXSW_REG_SBPR_LEN];
 	struct mlxsw_sp_sb_pr *pr;
 	int err;
 
-	mlxsw_reg_sbpr_pack(sbpr_pl, pool, dir, mode, size);
+	mlxsw_reg_sbpr_pack(sbpr_pl, des->pool, des->dir, mode, size);
 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbpr), sbpr_pl);
 	if (err)
 		return err;
 
-	pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool, dir);
+	pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool_index);
 	pr->mode = mode;
 	pr->size = size;
 	return 0;
 }
 
 static int mlxsw_sp_sb_cm_write(struct mlxsw_sp *mlxsw_sp, u8 local_port,
-				u8 pg_buff, enum mlxsw_reg_sbxx_dir dir,
-				u32 min_buff, u32 max_buff, u8 pool)
+				u8 pg_buff, u32 min_buff, u32 max_buff,
+				u16 pool_index)
 {
+	const struct mlxsw_sp_sb_pool_des *des =
+		&mlxsw_sp_sb_pool_dess[pool_index];
 	char sbcm_pl[MLXSW_REG_SBCM_LEN];
 	int err;
 
-	mlxsw_reg_sbcm_pack(sbcm_pl, local_port, pg_buff, dir,
-			    min_buff, max_buff, pool);
+	mlxsw_reg_sbcm_pack(sbcm_pl, local_port, pg_buff, des->dir,
+			    min_buff, max_buff, des->pool);
 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbcm), sbcm_pl);
 	if (err)
 		return err;
 	if (pg_buff < MLXSW_SP_SB_TC_COUNT) {
 		struct mlxsw_sp_sb_cm *cm;
 
-		cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port, pg_buff, dir);
+		cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port, pg_buff,
+					des->dir);
 		cm->min_buff = min_buff;
 		cm->max_buff = max_buff;
-		cm->pool = pool;
+		cm->pool_index = pool_index;
 	}
 	return 0;
 }
 
 static int mlxsw_sp_sb_pm_write(struct mlxsw_sp *mlxsw_sp, u8 local_port,
-				u8 pool, enum mlxsw_reg_sbxx_dir dir,
-				u32 min_buff, u32 max_buff)
+				u16 pool_index, u32 min_buff, u32 max_buff)
 {
+	const struct mlxsw_sp_sb_pool_des *des =
+		&mlxsw_sp_sb_pool_dess[pool_index];
 	char sbpm_pl[MLXSW_REG_SBPM_LEN];
 	struct mlxsw_sp_sb_pm *pm;
 	int err;
 
-	mlxsw_reg_sbpm_pack(sbpm_pl, local_port, pool, dir, false,
+	mlxsw_reg_sbpm_pack(sbpm_pl, local_port, des->pool, des->dir, false,
 			    min_buff, max_buff);
 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbpm), sbpm_pl);
 	if (err)
 		return err;
 
-	pm = mlxsw_sp_sb_pm_get(mlxsw_sp, local_port, pool, dir);
+	pm = mlxsw_sp_sb_pm_get(mlxsw_sp, local_port, pool_index);
 	pm->min_buff = min_buff;
 	pm->max_buff = max_buff;
 	return 0;
 }
 
 static int mlxsw_sp_sb_pm_occ_clear(struct mlxsw_sp *mlxsw_sp, u8 local_port,
-				    u8 pool, enum mlxsw_reg_sbxx_dir dir,
-				    struct list_head *bulk_list)
+				    u16 pool_index, struct list_head *bulk_list)
 {
+	const struct mlxsw_sp_sb_pool_des *des =
+		&mlxsw_sp_sb_pool_dess[pool_index];
 	char sbpm_pl[MLXSW_REG_SBPM_LEN];
 
-	mlxsw_reg_sbpm_pack(sbpm_pl, local_port, pool, dir, true, 0, 0);
+	mlxsw_reg_sbpm_pack(sbpm_pl, local_port, des->pool, des->dir,
+			    true, 0, 0);
 	return mlxsw_reg_trans_query(mlxsw_sp->core, MLXSW_REG(sbpm), sbpm_pl,
 				     bulk_list, NULL, 0);
 }
@@ -163,14 +187,16 @@ static void mlxsw_sp_sb_pm_occ_query_cb(
 }
 
 static int mlxsw_sp_sb_pm_occ_query(struct mlxsw_sp *mlxsw_sp, u8 local_port,
-				    u8 pool, enum mlxsw_reg_sbxx_dir dir,
-				    struct list_head *bulk_list)
+				    u16 pool_index, struct list_head *bulk_list)
 {
+	const struct mlxsw_sp_sb_pool_des *des =
+		&mlxsw_sp_sb_pool_dess[pool_index];
 	char sbpm_pl[MLXSW_REG_SBPM_LEN];
 	struct mlxsw_sp_sb_pm *pm;
 
-	pm = mlxsw_sp_sb_pm_get(mlxsw_sp, local_port, pool, dir);
-	mlxsw_reg_sbpm_pack(sbpm_pl, local_port, pool, dir, false, 0, 0);
+	pm = mlxsw_sp_sb_pm_get(mlxsw_sp, local_port, pool_index);
+	mlxsw_reg_sbpm_pack(sbpm_pl, local_port, des->pool, des->dir,
+			    false, 0, 0);
 	return mlxsw_reg_trans_query(mlxsw_sp->core, MLXSW_REG(sbpm), sbpm_pl,
 				     bulk_list,
 				     mlxsw_sp_sb_pm_occ_query_cb,
@@ -254,30 +280,26 @@ static void mlxsw_sp_sb_ports_fini(struc
 		.size = _size,		\
 	}
 
-static const struct mlxsw_sp_sb_pr mlxsw_sp_sb_prs_ingress[] = {
+static const struct mlxsw_sp_sb_pr mlxsw_sp_sb_prs[] = {
+	/* Ingress pools. */
 	MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC,
 		       MLXSW_SP_SB_PR_INGRESS_SIZE),
 	MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
 	MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
 	MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC,
 		       MLXSW_SP_SB_PR_INGRESS_MNG_SIZE),
-};
-
-#define MLXSW_SP_SB_PRS_INGRESS_LEN ARRAY_SIZE(mlxsw_sp_sb_prs_ingress)
-
-static const struct mlxsw_sp_sb_pr mlxsw_sp_sb_prs_egress[] = {
+	/* Egress pools. */
 	MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, MLXSW_SP_SB_PR_EGRESS_SIZE),
 	MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
 	MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
 	MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
 };
 
-#define MLXSW_SP_SB_PRS_EGRESS_LEN ARRAY_SIZE(mlxsw_sp_sb_prs_egress)
+#define MLXSW_SP_SB_PRS_LEN ARRAY_SIZE(mlxsw_sp_sb_prs)
 
-static int __mlxsw_sp_sb_prs_init(struct mlxsw_sp *mlxsw_sp,
-				  enum mlxsw_reg_sbxx_dir dir,
-				  const struct mlxsw_sp_sb_pr *prs,
-				  size_t prs_len)
+static int mlxsw_sp_sb_prs_init(struct mlxsw_sp *mlxsw_sp,
+				const struct mlxsw_sp_sb_pr *prs,
+				size_t prs_len)
 {
 	int i;
 	int err;
@@ -285,32 +307,18 @@ static int __mlxsw_sp_sb_prs_init(struct
 	for (i = 0; i < prs_len; i++) {
 		u32 size = mlxsw_sp_bytes_cells(mlxsw_sp, prs[i].size);
 
-		err = mlxsw_sp_sb_pr_write(mlxsw_sp, i, dir, prs[i].mode, size);
+		err = mlxsw_sp_sb_pr_write(mlxsw_sp, i, prs[i].mode, size);
 		if (err)
 			return err;
 	}
 	return 0;
 }
 
-static int mlxsw_sp_sb_prs_init(struct mlxsw_sp *mlxsw_sp)
-{
-	int err;
-
-	err = __mlxsw_sp_sb_prs_init(mlxsw_sp, MLXSW_REG_SBXX_DIR_INGRESS,
-				     mlxsw_sp_sb_prs_ingress,
-				     MLXSW_SP_SB_PRS_INGRESS_LEN);
-	if (err)
-		return err;
-	return __mlxsw_sp_sb_prs_init(mlxsw_sp, MLXSW_REG_SBXX_DIR_EGRESS,
-				      mlxsw_sp_sb_prs_egress,
-				      MLXSW_SP_SB_PRS_EGRESS_LEN);
-}
-
 #define MLXSW_SP_SB_CM(_min_buff, _max_buff, _pool)	\
 	{						\
 		.min_buff = _min_buff,			\
 		.max_buff = _max_buff,			\
-		.pool = _pool,				\
+		.pool_index = _pool,			\
 	}
 
 static const struct mlxsw_sp_sb_cm mlxsw_sp_sb_cms_ingress[] = {
@@ -329,38 +337,38 @@ static const struct mlxsw_sp_sb_cm mlxsw
 #define MLXSW_SP_SB_CMS_INGRESS_LEN ARRAY_SIZE(mlxsw_sp_sb_cms_ingress)
 
 static const struct mlxsw_sp_sb_cm mlxsw_sp_sb_cms_egress[] = {
-	MLXSW_SP_SB_CM(1500, 9, 0),
-	MLXSW_SP_SB_CM(1500, 9, 0),
-	MLXSW_SP_SB_CM(1500, 9, 0),
-	MLXSW_SP_SB_CM(1500, 9, 0),
-	MLXSW_SP_SB_CM(1500, 9, 0),
-	MLXSW_SP_SB_CM(1500, 9, 0),
-	MLXSW_SP_SB_CM(1500, 9, 0),
-	MLXSW_SP_SB_CM(1500, 9, 0),
-	MLXSW_SP_SB_CM(0, 140000, 15),
-	MLXSW_SP_SB_CM(0, 140000, 15),
-	MLXSW_SP_SB_CM(0, 140000, 15),
-	MLXSW_SP_SB_CM(0, 140000, 15),
-	MLXSW_SP_SB_CM(0, 140000, 15),
-	MLXSW_SP_SB_CM(0, 140000, 15),
-	MLXSW_SP_SB_CM(0, 140000, 15),
-	MLXSW_SP_SB_CM(0, 140000, 15),
-	MLXSW_SP_SB_CM(1, 0xff, 0),
+	MLXSW_SP_SB_CM(1500, 9, 4),
+	MLXSW_SP_SB_CM(1500, 9, 4),
+	MLXSW_SP_SB_CM(1500, 9, 4),
+	MLXSW_SP_SB_CM(1500, 9, 4),
+	MLXSW_SP_SB_CM(1500, 9, 4),
+	MLXSW_SP_SB_CM(1500, 9, 4),
+	MLXSW_SP_SB_CM(1500, 9, 4),
+	MLXSW_SP_SB_CM(1500, 9, 4),
+	MLXSW_SP_SB_CM(0, 140000, 8),
+	MLXSW_SP_SB_CM(0, 140000, 8),
+	MLXSW_SP_SB_CM(0, 140000, 8),
+	MLXSW_SP_SB_CM(0, 140000, 8),
+	MLXSW_SP_SB_CM(0, 140000, 8),
+	MLXSW_SP_SB_CM(0, 140000, 8),
+	MLXSW_SP_SB_CM(0, 140000, 8),
+	MLXSW_SP_SB_CM(0, 140000, 8),
+	MLXSW_SP_SB_CM(1, 0xff, 4),
 };
 
 #define MLXSW_SP_SB_CMS_EGRESS_LEN ARRAY_SIZE(mlxsw_sp_sb_cms_egress)
 
-#define MLXSW_SP_CPU_PORT_SB_CM MLXSW_SP_SB_CM(0, 0, 0)
+#define MLXSW_SP_CPU_PORT_SB_CM MLXSW_SP_SB_CM(0, 0, 4)
 
 static const struct mlxsw_sp_sb_cm mlxsw_sp_cpu_port_sb_cms[] = {
 	MLXSW_SP_CPU_PORT_SB_CM,
-	MLXSW_SP_SB_CM(MLXSW_PORT_MAX_MTU, 0, 0),
-	MLXSW_SP_SB_CM(MLXSW_PORT_MAX_MTU, 0, 0),
-	MLXSW_SP_SB_CM(MLXSW_PORT_MAX_MTU, 0, 0),
-	MLXSW_SP_SB_CM(MLXSW_PORT_MAX_MTU, 0, 0),
-	MLXSW_SP_SB_CM(MLXSW_PORT_MAX_MTU, 0, 0),
+	MLXSW_SP_SB_CM(MLXSW_PORT_MAX_MTU, 0, 4),
+	MLXSW_SP_SB_CM(MLXSW_PORT_MAX_MTU, 0, 4),
+	MLXSW_SP_SB_CM(MLXSW_PORT_MAX_MTU, 0, 4),
+	MLXSW_SP_SB_CM(MLXSW_PORT_MAX_MTU, 0, 4),
+	MLXSW_SP_SB_CM(MLXSW_PORT_MAX_MTU, 0, 4),
 	MLXSW_SP_CPU_PORT_SB_CM,
-	MLXSW_SP_SB_CM(MLXSW_PORT_MAX_MTU, 0, 0),
+	MLXSW_SP_SB_CM(MLXSW_PORT_MAX_MTU, 0, 4),
 	MLXSW_SP_CPU_PORT_SB_CM,
 	MLXSW_SP_CPU_PORT_SB_CM,
 	MLXSW_SP_CPU_PORT_SB_CM,
@@ -405,12 +413,16 @@ static int __mlxsw_sp_sb_cms_init(struct
 		if (i == 8 && dir == MLXSW_REG_SBXX_DIR_INGRESS)
 			continue; /* PG number 8 does not exist, skip it */
 		cm = &cms[i];
+		if (WARN_ON(mlxsw_sp_sb_pool_dess[cm->pool_index].dir != dir))
+			continue;
+
 		/* All pools are initialized using dynamic thresholds,
 		 * therefore 'max_buff' isn't specified in cells.
 		 */
 		min_buff = mlxsw_sp_bytes_cells(mlxsw_sp, cm->min_buff);
-		err = mlxsw_sp_sb_cm_write(mlxsw_sp, local_port, i, dir,
-					   min_buff, cm->max_buff, cm->pool);
+		err = mlxsw_sp_sb_cm_write(mlxsw_sp, local_port, i,
+					   min_buff, cm->max_buff,
+					   cm->pool_index);
 		if (err)
 			return err;
 	}
@@ -448,91 +460,67 @@ static int mlxsw_sp_cpu_port_sb_cms_init
 		.max_buff = _max_buff,		\
 	}
 
-static const struct mlxsw_sp_sb_pm mlxsw_sp_sb_pms_ingress[] = {
+static const struct mlxsw_sp_sb_pm mlxsw_sp_sb_pms[] = {
+	/* Ingress pools. */
 	MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MAX),
 	MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
 	MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
 	MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MAX),
-};
-
-#define MLXSW_SP_SB_PMS_INGRESS_LEN ARRAY_SIZE(mlxsw_sp_sb_pms_ingress)
-
-static const struct mlxsw_sp_sb_pm mlxsw_sp_sb_pms_egress[] = {
+	/* Egress pools. */
 	MLXSW_SP_SB_PM(0, 7),
 	MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
 	MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
 	MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
 };
 
-#define MLXSW_SP_SB_PMS_EGRESS_LEN ARRAY_SIZE(mlxsw_sp_sb_pms_egress)
+#define MLXSW_SP_SB_PMS_LEN ARRAY_SIZE(mlxsw_sp_sb_pms)
 
-static int __mlxsw_sp_port_sb_pms_init(struct mlxsw_sp *mlxsw_sp, u8 local_port,
-				       enum mlxsw_reg_sbxx_dir dir,
-				       const struct mlxsw_sp_sb_pm *pms,
-				       size_t pms_len)
+static int mlxsw_sp_port_sb_pms_init(struct mlxsw_sp_port *mlxsw_sp_port)
 {
+	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
 	int i;
 	int err;
 
-	for (i = 0; i < pms_len; i++) {
-		const struct mlxsw_sp_sb_pm *pm;
+	for (i = 0; i < MLXSW_SP_SB_PMS_LEN; i++) {
+		const struct mlxsw_sp_sb_pm *pm = &mlxsw_sp_sb_pms[i];
 
-		pm = &pms[i];
-		err = mlxsw_sp_sb_pm_write(mlxsw_sp, local_port, i, dir,
-					   pm->min_buff, pm->max_buff);
+		err = mlxsw_sp_sb_pm_write(mlxsw_sp, mlxsw_sp_port->local_port,
+					   i, pm->min_buff, pm->max_buff);
 		if (err)
 			return err;
 	}
 	return 0;
 }
 
-static int mlxsw_sp_port_sb_pms_init(struct mlxsw_sp_port *mlxsw_sp_port)
-{
-	int err;
-
-	err = __mlxsw_sp_port_sb_pms_init(mlxsw_sp_port->mlxsw_sp,
-					  mlxsw_sp_port->local_port,
-					  MLXSW_REG_SBXX_DIR_INGRESS,
-					  mlxsw_sp_sb_pms_ingress,
-					  MLXSW_SP_SB_PMS_INGRESS_LEN);
-	if (err)
-		return err;
-	return __mlxsw_sp_port_sb_pms_init(mlxsw_sp_port->mlxsw_sp,
-					   mlxsw_sp_port->local_port,
-					   MLXSW_REG_SBXX_DIR_EGRESS,
-					   mlxsw_sp_sb_pms_egress,
-					   MLXSW_SP_SB_PMS_EGRESS_LEN);
-}
-
 struct mlxsw_sp_sb_mm {
 	u32 min_buff;
 	u32 max_buff;
-	u8 pool;
+	u16 pool_index;
 };
 
 #define MLXSW_SP_SB_MM(_min_buff, _max_buff, _pool)	\
 	{						\
 		.min_buff = _min_buff,			\
 		.max_buff = _max_buff,			\
-		.pool = _pool,				\
+		.pool_index = _pool,			\
 	}
 
 static const struct mlxsw_sp_sb_mm mlxsw_sp_sb_mms[] = {
-	MLXSW_SP_SB_MM(20000, 0xff, 0),
-	MLXSW_SP_SB_MM(20000, 0xff, 0),
-	MLXSW_SP_SB_MM(20000, 0xff, 0),
-	MLXSW_SP_SB_MM(20000, 0xff, 0),
-	MLXSW_SP_SB_MM(20000, 0xff, 0),
-	MLXSW_SP_SB_MM(20000, 0xff, 0),
-	MLXSW_SP_SB_MM(20000, 0xff, 0),
-	MLXSW_SP_SB_MM(20000, 0xff, 0),
-	MLXSW_SP_SB_MM(20000, 0xff, 0),
-	MLXSW_SP_SB_MM(20000, 0xff, 0),
-	MLXSW_SP_SB_MM(20000, 0xff, 0),
-	MLXSW_SP_SB_MM(20000, 0xff, 0),
-	MLXSW_SP_SB_MM(20000, 0xff, 0),
-	MLXSW_SP_SB_MM(20000, 0xff, 0),
-	MLXSW_SP_SB_MM(20000, 0xff, 0),
+	MLXSW_SP_SB_MM(20000, 0xff, 4),
+	MLXSW_SP_SB_MM(20000, 0xff, 4),
+	MLXSW_SP_SB_MM(20000, 0xff, 4),
+	MLXSW_SP_SB_MM(20000, 0xff, 4),
+	MLXSW_SP_SB_MM(20000, 0xff, 4),
+	MLXSW_SP_SB_MM(20000, 0xff, 4),
+	MLXSW_SP_SB_MM(20000, 0xff, 4),
+	MLXSW_SP_SB_MM(20000, 0xff, 4),
+	MLXSW_SP_SB_MM(20000, 0xff, 4),
+	MLXSW_SP_SB_MM(20000, 0xff, 4),
+	MLXSW_SP_SB_MM(20000, 0xff, 4),
+	MLXSW_SP_SB_MM(20000, 0xff, 4),
+	MLXSW_SP_SB_MM(20000, 0xff, 4),
+	MLXSW_SP_SB_MM(20000, 0xff, 4),
+	MLXSW_SP_SB_MM(20000, 0xff, 4),
 };
 
 #define MLXSW_SP_SB_MMS_LEN ARRAY_SIZE(mlxsw_sp_sb_mms)
@@ -544,16 +532,18 @@ static int mlxsw_sp_sb_mms_init(struct m
 	int err;
 
 	for (i = 0; i < MLXSW_SP_SB_MMS_LEN; i++) {
+		const struct mlxsw_sp_sb_pool_des *des;
 		const struct mlxsw_sp_sb_mm *mc;
 		u32 min_buff;
 
 		mc = &mlxsw_sp_sb_mms[i];
+		des = &mlxsw_sp_sb_pool_dess[mc->pool_index];
 		/* All pools are initialized using dynamic thresholds,
 		 * therefore 'max_buff' isn't specified in cells.
 		 */
 		min_buff = mlxsw_sp_bytes_cells(mlxsw_sp, mc->min_buff);
 		mlxsw_reg_sbmm_pack(sbmm_pl, i, min_buff, mc->max_buff,
-				    mc->pool);
+				    des->pool);
 		err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbmm), sbmm_pl);
 		if (err)
 			return err;
@@ -561,8 +551,24 @@ static int mlxsw_sp_sb_mms_init(struct m
 	return 0;
 }
 
+static void mlxsw_sp_pool_count(u16 *p_ingress_len, u16 *p_egress_len)
+{
+	int i;
+
+	for (i = 0; i < MLXSW_SP_SB_POOL_DESS_LEN; ++i)
+		if (mlxsw_sp_sb_pool_dess[i].dir == MLXSW_REG_SBXX_DIR_EGRESS)
+			goto out;
+	WARN(1, "No egress pools\n");
+
+out:
+	*p_ingress_len = i;
+	*p_egress_len = MLXSW_SP_SB_POOL_DESS_LEN - i;
+}
+
 int mlxsw_sp_buffers_init(struct mlxsw_sp *mlxsw_sp)
 {
+	u16 ing_pool_count;
+	u16 eg_pool_count;
 	u64 sb_size;
 	int err;
 
@@ -581,7 +587,8 @@ int mlxsw_sp_buffers_init(struct mlxsw_s
 	err = mlxsw_sp_sb_ports_init(mlxsw_sp);
 	if (err)
 		goto err_sb_ports_init;
-	err = mlxsw_sp_sb_prs_init(mlxsw_sp);
+	err = mlxsw_sp_sb_prs_init(mlxsw_sp, mlxsw_sp_sb_prs,
+				   MLXSW_SP_SB_PRS_LEN);
 	if (err)
 		goto err_sb_prs_init;
 	err = mlxsw_sp_cpu_port_sb_cms_init(mlxsw_sp);
@@ -590,9 +597,10 @@ int mlxsw_sp_buffers_init(struct mlxsw_s
 	err = mlxsw_sp_sb_mms_init(mlxsw_sp);
 	if (err)
 		goto err_sb_mms_init;
+	mlxsw_sp_pool_count(&ing_pool_count, &eg_pool_count);
 	err = devlink_sb_register(priv_to_devlink(mlxsw_sp->core), 0, sb_size,
-				  MLXSW_SP_SB_POOL_COUNT,
-				  MLXSW_SP_SB_POOL_COUNT,
+				  ing_pool_count,
+				  eg_pool_count,
 				  MLXSW_SP_SB_TC_COUNT,
 				  MLXSW_SP_SB_TC_COUNT);
 	if (err)
@@ -632,36 +640,15 @@ int mlxsw_sp_port_buffers_init(struct ml
 	return err;
 }
 
-static u8 pool_get(u16 pool_index)
-{
-	return pool_index % MLXSW_SP_SB_POOL_COUNT;
-}
-
-static u16 pool_index_get(u8 pool, enum mlxsw_reg_sbxx_dir dir)
-{
-	u16 pool_index;
-
-	pool_index = pool;
-	if (dir == MLXSW_REG_SBXX_DIR_EGRESS)
-		pool_index += MLXSW_SP_SB_POOL_COUNT;
-	return pool_index;
-}
-
-static enum mlxsw_reg_sbxx_dir dir_get(u16 pool_index)
-{
-	return pool_index < MLXSW_SP_SB_POOL_COUNT ?
-	       MLXSW_REG_SBXX_DIR_INGRESS : MLXSW_REG_SBXX_DIR_EGRESS;
-}
-
 int mlxsw_sp_sb_pool_get(struct mlxsw_core *mlxsw_core,
 			 unsigned int sb_index, u16 pool_index,
 			 struct devlink_sb_pool_info *pool_info)
 {
+	enum mlxsw_reg_sbxx_dir dir = mlxsw_sp_sb_pool_dess[pool_index].dir;
 	struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
-	u8 pool = pool_get(pool_index);
-	enum mlxsw_reg_sbxx_dir dir = dir_get(pool_index);
-	struct mlxsw_sp_sb_pr *pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool, dir);
+	struct mlxsw_sp_sb_pr *pr;
 
+	pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool_index);
 	pool_info->pool_type = (enum devlink_sb_pool_type) dir;
 	pool_info->size = mlxsw_sp_cells_bytes(mlxsw_sp, pr->size);
 	pool_info->threshold_type = (enum devlink_sb_threshold_type) pr->mode;
@@ -674,34 +661,31 @@ int mlxsw_sp_sb_pool_set(struct mlxsw_co
 {
 	struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
 	u32 pool_size = mlxsw_sp_bytes_cells(mlxsw_sp, size);
-	u8 pool = pool_get(pool_index);
-	enum mlxsw_reg_sbxx_dir dir = dir_get(pool_index);
 	enum mlxsw_reg_sbpr_mode mode;
 
 	if (size > MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_BUFFER_SIZE))
 		return -EINVAL;
 
 	mode = (enum mlxsw_reg_sbpr_mode) threshold_type;
-	return mlxsw_sp_sb_pr_write(mlxsw_sp, pool, dir, mode, pool_size);
+	return mlxsw_sp_sb_pr_write(mlxsw_sp, pool_index, mode, pool_size);
 }
 
 #define MLXSW_SP_SB_THRESHOLD_TO_ALPHA_OFFSET (-2) /* 3->1, 16->14 */
 
-static u32 mlxsw_sp_sb_threshold_out(struct mlxsw_sp *mlxsw_sp, u8 pool,
-				     enum mlxsw_reg_sbxx_dir dir, u32 max_buff)
+static u32 mlxsw_sp_sb_threshold_out(struct mlxsw_sp *mlxsw_sp, u16 pool_index,
+				     u32 max_buff)
 {
-	struct mlxsw_sp_sb_pr *pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool, dir);
+	struct mlxsw_sp_sb_pr *pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool_index);
 
 	if (pr->mode == MLXSW_REG_SBPR_MODE_DYNAMIC)
 		return max_buff - MLXSW_SP_SB_THRESHOLD_TO_ALPHA_OFFSET;
 	return mlxsw_sp_cells_bytes(mlxsw_sp, max_buff);
 }
 
-static int mlxsw_sp_sb_threshold_in(struct mlxsw_sp *mlxsw_sp, u8 pool,
-				    enum mlxsw_reg_sbxx_dir dir, u32 threshold,
-				    u32 *p_max_buff)
+static int mlxsw_sp_sb_threshold_in(struct mlxsw_sp *mlxsw_sp, u16 pool_index,
+				    u32 threshold, u32 *p_max_buff)
 {
-	struct mlxsw_sp_sb_pr *pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool, dir);
+	struct mlxsw_sp_sb_pr *pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool_index);
 
 	if (pr->mode == MLXSW_REG_SBPR_MODE_DYNAMIC) {
 		int val;
@@ -725,12 +709,10 @@ int mlxsw_sp_sb_port_pool_get(struct mlx
 			mlxsw_core_port_driver_priv(mlxsw_core_port);
 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
 	u8 local_port = mlxsw_sp_port->local_port;
-	u8 pool = pool_get(pool_index);
-	enum mlxsw_reg_sbxx_dir dir = dir_get(pool_index);
 	struct mlxsw_sp_sb_pm *pm = mlxsw_sp_sb_pm_get(mlxsw_sp, local_port,
-						       pool, dir);
+						       pool_index);
 
-	*p_threshold = mlxsw_sp_sb_threshold_out(mlxsw_sp, pool, dir,
+	*p_threshold = mlxsw_sp_sb_threshold_out(mlxsw_sp, pool_index,
 						 pm->max_buff);
 	return 0;
 }
@@ -743,17 +725,15 @@ int mlxsw_sp_sb_port_pool_set(struct mlx
 			mlxsw_core_port_driver_priv(mlxsw_core_port);
 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
 	u8 local_port = mlxsw_sp_port->local_port;
-	u8 pool = pool_get(pool_index);
-	enum mlxsw_reg_sbxx_dir dir = dir_get(pool_index);
 	u32 max_buff;
 	int err;
 
-	err = mlxsw_sp_sb_threshold_in(mlxsw_sp, pool, dir,
+	err = mlxsw_sp_sb_threshold_in(mlxsw_sp, pool_index,
 				       threshold, &max_buff);
 	if (err)
 		return err;
 
-	return mlxsw_sp_sb_pm_write(mlxsw_sp, local_port, pool, dir,
+	return mlxsw_sp_sb_pm_write(mlxsw_sp, local_port, pool_index,
 				    0, max_buff);
 }
 
@@ -771,9 +751,9 @@ int mlxsw_sp_sb_tc_pool_bind_get(struct
 	struct mlxsw_sp_sb_cm *cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port,
 						       pg_buff, dir);
 
-	*p_threshold = mlxsw_sp_sb_threshold_out(mlxsw_sp, cm->pool, dir,
+	*p_threshold = mlxsw_sp_sb_threshold_out(mlxsw_sp, cm->pool_index,
 						 cm->max_buff);
-	*p_pool_index = pool_index_get(cm->pool, dir);
+	*p_pool_index = cm->pool_index;
 	return 0;
 }
 
@@ -788,20 +768,19 @@ int mlxsw_sp_sb_tc_pool_bind_set(struct
 	u8 local_port = mlxsw_sp_port->local_port;
 	u8 pg_buff = tc_index;
 	enum mlxsw_reg_sbxx_dir dir = (enum mlxsw_reg_sbxx_dir) pool_type;
-	u8 pool = pool_get(pool_index);
 	u32 max_buff;
 	int err;
 
-	if (dir != dir_get(pool_index))
+	if (dir != mlxsw_sp_sb_pool_dess[pool_index].dir)
 		return -EINVAL;
 
-	err = mlxsw_sp_sb_threshold_in(mlxsw_sp, pool, dir,
+	err = mlxsw_sp_sb_threshold_in(mlxsw_sp, pool_index,
 				       threshold, &max_buff);
 	if (err)
 		return err;
 
-	return mlxsw_sp_sb_cm_write(mlxsw_sp, local_port, pg_buff, dir,
-				    0, max_buff, pool);
+	return mlxsw_sp_sb_cm_write(mlxsw_sp, local_port, pg_buff,
+				    0, max_buff, pool_index);
 }
 
 #define MASKED_COUNT_MAX \
@@ -889,14 +868,8 @@ next_batch:
 			continue;
 		mlxsw_reg_sbsr_ingress_port_mask_set(sbsr_pl, local_port, 1);
 		mlxsw_reg_sbsr_egress_port_mask_set(sbsr_pl, local_port, 1);
-		for (i = 0; i < MLXSW_SP_SB_POOL_COUNT; i++) {
+		for (i = 0; i < MLXSW_SP_SB_POOL_DESS_LEN; i++) {
 			err = mlxsw_sp_sb_pm_occ_query(mlxsw_sp, local_port, i,
-						       MLXSW_REG_SBXX_DIR_INGRESS,
-						       &bulk_list);
-			if (err)
-				goto out;
-			err = mlxsw_sp_sb_pm_occ_query(mlxsw_sp, local_port, i,
-						       MLXSW_REG_SBXX_DIR_EGRESS,
 						       &bulk_list);
 			if (err)
 				goto out;
@@ -954,14 +927,8 @@ next_batch:
 			continue;
 		mlxsw_reg_sbsr_ingress_port_mask_set(sbsr_pl, local_port, 1);
 		mlxsw_reg_sbsr_egress_port_mask_set(sbsr_pl, local_port, 1);
-		for (i = 0; i < MLXSW_SP_SB_POOL_COUNT; i++) {
-			err = mlxsw_sp_sb_pm_occ_clear(mlxsw_sp, local_port, i,
-						       MLXSW_REG_SBXX_DIR_INGRESS,
-						       &bulk_list);
-			if (err)
-				goto out;
+		for (i = 0; i < MLXSW_SP_SB_POOL_DESS_LEN; i++) {
 			err = mlxsw_sp_sb_pm_occ_clear(mlxsw_sp, local_port, i,
-						       MLXSW_REG_SBXX_DIR_EGRESS,
 						       &bulk_list);
 			if (err)
 				goto out;
@@ -994,10 +961,8 @@ int mlxsw_sp_sb_occ_port_pool_get(struct
 			mlxsw_core_port_driver_priv(mlxsw_core_port);
 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
 	u8 local_port = mlxsw_sp_port->local_port;
-	u8 pool = pool_get(pool_index);
-	enum mlxsw_reg_sbxx_dir dir = dir_get(pool_index);
 	struct mlxsw_sp_sb_pm *pm = mlxsw_sp_sb_pm_get(mlxsw_sp, local_port,
-						       pool, dir);
+						       pool_index);
 
 	*p_cur = mlxsw_sp_cells_bytes(mlxsw_sp, pm->occ.cur);
 	*p_max = mlxsw_sp_cells_bytes(mlxsw_sp, pm->occ.max);