Blob Blame History Raw
From: Petr Tesarik <ptesarik@suse.com>
Subject: kABI: protect struct smc_link
Patch-mainline: never, kabi
References: bsc#1129861 LTC#176250

Upstream commit ad6f317f720f4a3121756c23831a43dda9b095e5 adds new
fields to struct smc_link. However, this structure is embedded
inside struct smc_link_group, so it cannot be resized without
moving following fields inside that structure.

Thankfully, there is a one-to-one correspondence between a link
and its link group, because there can be currently only one link
per group. The extra fields can be added at the end of struct
smc_link_group, which is allocated dynamically, and its size
should never be needed by another module.

Signed-off-by: Petr Tesarik <ptesarik@suse.com>
---
 net/smc/smc_core.h |   11 ++++++++--
 net/smc/smc_wr.c   |   56 +++++++++++++++++++++++++++++++----------------------
 2 files changed, 42 insertions(+), 25 deletions(-)

--- a/net/smc/smc_core.h
+++ b/net/smc/smc_core.h
@@ -81,8 +81,6 @@ struct smc_link {
 	struct smc_wr_buf	*wr_tx_bufs;	/* WR send payload buffers */
 	struct ib_send_wr	*wr_tx_ibs;	/* WR send meta data */
 	struct ib_sge		*wr_tx_sges;	/* WR send gather meta data */
-	struct smc_rdma_sges	*wr_tx_rdma_sges;/*RDMA WRITE gather meta data*/
-	struct smc_rdma_wr	*wr_tx_rdmas;	/* WR RDMA WRITE */
 	struct smc_wr_tx_pend	*wr_tx_pends;	/* WR send waiting for CQE */
 	/* above four vectors have wr_tx_cnt elements and use the same index */
 	dma_addr_t		wr_tx_dma_addr;	/* DMA address of wr_tx_bufs */
@@ -133,6 +131,11 @@ struct smc_link {
 	struct mutex		llc_delete_rkey_mutex; /* serialize usage */
 };
 
+struct smc_link_extra {
+	struct smc_rdma_sges	*wr_tx_rdma_sges;/*RDMA WRITE gather meta data*/
+	struct smc_rdma_wr	*wr_tx_rdmas;	/* WR RDMA WRITE */
+};
+
 /* For now we just allow one parallel link per link group. The SMC protocol
  * allows more (up to 8).
  */
@@ -226,6 +229,10 @@ struct smc_link_group {
 						/* ISM device for VLAN reg. */
 		};
 	};
+#ifndef __GENKSYMS__
+	struct smc_link_extra	lnk_extra[SMC_LINKS_PER_LGR_MAX];
+				/* smc_link fields added after kABI freeze */
+#endif
 };
 
 /* Find the connection associated with the given alert token in the link group.
--- a/net/smc/smc_wr.c
+++ b/net/smc/smc_wr.c
@@ -170,6 +170,8 @@ int smc_wr_tx_get_free_slot(struct smc_l
 			    struct smc_rdma_wr **wr_rdma_buf,
 			    struct smc_wr_tx_pend_priv **wr_pend_priv)
 {
+	struct smc_link_group *lgr = smc_get_lgr(link);
+	struct smc_link_extra *extra = &lgr->lnk_extra[SMC_SINGLE_LINK];
 	struct smc_wr_tx_pend *wr_pend;
 	u32 idx = link->wr_tx_cnt;
 	struct ib_send_wr *wr_ib;
@@ -206,7 +208,7 @@ int smc_wr_tx_get_free_slot(struct smc_l
 	wr_ib->wr_id = wr_id;
 	*wr_buf = &link->wr_tx_bufs[idx];
 	if (wr_rdma_buf)
-		*wr_rdma_buf = &link->wr_tx_rdmas[idx];
+		*wr_rdma_buf = &extra->wr_tx_rdmas[idx];
 	*wr_pend_priv = &wr_pend->priv;
 	return 0;
 }
@@ -461,6 +463,8 @@ void smc_wr_remember_qp_attr(struct smc_
 
 static void smc_wr_init_sge(struct smc_link *lnk)
 {
+	struct smc_link_group *lgr = smc_get_lgr(lnk);
+	struct smc_link_extra *extra = &lgr->lnk_extra[SMC_SINGLE_LINK];
 	u32 i;
 
 	for (i = 0; i < lnk->wr_tx_cnt; i++) {
@@ -468,13 +472,13 @@ static void smc_wr_init_sge(struct smc_l
 			lnk->wr_tx_dma_addr + i * SMC_WR_BUF_SIZE;
 		lnk->wr_tx_sges[i].length = SMC_WR_TX_SIZE;
 		lnk->wr_tx_sges[i].lkey = lnk->roce_pd->local_dma_lkey;
-		lnk->wr_tx_rdma_sges[i].tx_rdma_sge[0].wr_tx_rdma_sge[0].lkey =
+		extra->wr_tx_rdma_sges[i].tx_rdma_sge[0].wr_tx_rdma_sge[0].lkey =
 			lnk->roce_pd->local_dma_lkey;
-		lnk->wr_tx_rdma_sges[i].tx_rdma_sge[0].wr_tx_rdma_sge[1].lkey =
+		extra->wr_tx_rdma_sges[i].tx_rdma_sge[0].wr_tx_rdma_sge[1].lkey =
 			lnk->roce_pd->local_dma_lkey;
-		lnk->wr_tx_rdma_sges[i].tx_rdma_sge[1].wr_tx_rdma_sge[0].lkey =
+		extra->wr_tx_rdma_sges[i].tx_rdma_sge[1].wr_tx_rdma_sge[0].lkey =
 			lnk->roce_pd->local_dma_lkey;
-		lnk->wr_tx_rdma_sges[i].tx_rdma_sge[1].wr_tx_rdma_sge[1].lkey =
+		extra->wr_tx_rdma_sges[i].tx_rdma_sge[1].wr_tx_rdma_sge[1].lkey =
 			lnk->roce_pd->local_dma_lkey;
 		lnk->wr_tx_ibs[i].next = NULL;
 		lnk->wr_tx_ibs[i].sg_list = &lnk->wr_tx_sges[i];
@@ -482,12 +486,12 @@ static void smc_wr_init_sge(struct smc_l
 		lnk->wr_tx_ibs[i].opcode = IB_WR_SEND;
 		lnk->wr_tx_ibs[i].send_flags =
 			IB_SEND_SIGNALED | IB_SEND_SOLICITED;
-		lnk->wr_tx_rdmas[i].wr_tx_rdma[0].wr.opcode = IB_WR_RDMA_WRITE;
-		lnk->wr_tx_rdmas[i].wr_tx_rdma[1].wr.opcode = IB_WR_RDMA_WRITE;
-		lnk->wr_tx_rdmas[i].wr_tx_rdma[0].wr.sg_list =
-			lnk->wr_tx_rdma_sges[i].tx_rdma_sge[0].wr_tx_rdma_sge;
-		lnk->wr_tx_rdmas[i].wr_tx_rdma[1].wr.sg_list =
-			lnk->wr_tx_rdma_sges[i].tx_rdma_sge[1].wr_tx_rdma_sge;
+		extra->wr_tx_rdmas[i].wr_tx_rdma[0].wr.opcode = IB_WR_RDMA_WRITE;
+		extra->wr_tx_rdmas[i].wr_tx_rdma[1].wr.opcode = IB_WR_RDMA_WRITE;
+		extra->wr_tx_rdmas[i].wr_tx_rdma[0].wr.sg_list =
+			extra->wr_tx_rdma_sges[i].tx_rdma_sge[0].wr_tx_rdma_sge;
+		extra->wr_tx_rdmas[i].wr_tx_rdma[1].wr.sg_list =
+			extra->wr_tx_rdma_sges[i].tx_rdma_sge[1].wr_tx_rdma_sge;
 	}
 	for (i = 0; i < lnk->wr_rx_cnt; i++) {
 		lnk->wr_rx_sges[i].addr =
@@ -532,18 +536,21 @@ void smc_wr_free_link(struct smc_link *l
 
 void smc_wr_free_link_mem(struct smc_link *lnk)
 {
+	struct smc_link_group *lgr = smc_get_lgr(lnk);
+	struct smc_link_extra *extra = &lgr->lnk_extra[SMC_SINGLE_LINK];
+
 	kfree(lnk->wr_tx_pends);
 	lnk->wr_tx_pends = NULL;
 	kfree(lnk->wr_tx_mask);
 	lnk->wr_tx_mask = NULL;
 	kfree(lnk->wr_tx_sges);
 	lnk->wr_tx_sges = NULL;
-	kfree(lnk->wr_tx_rdma_sges);
-	lnk->wr_tx_rdma_sges = NULL;
+	kfree(extra->wr_tx_rdma_sges);
+	extra->wr_tx_rdma_sges = NULL;
 	kfree(lnk->wr_rx_sges);
 	lnk->wr_rx_sges = NULL;
-	kfree(lnk->wr_tx_rdmas);
-	lnk->wr_tx_rdmas = NULL;
+	kfree(extra->wr_tx_rdmas);
+	extra->wr_tx_rdmas = NULL;
 	kfree(lnk->wr_rx_ibs);
 	lnk->wr_rx_ibs = NULL;
 	kfree(lnk->wr_tx_ibs);
@@ -556,6 +563,9 @@ void smc_wr_free_link_mem(struct smc_lin
 
 int smc_wr_alloc_link_mem(struct smc_link *link)
 {
+	struct smc_link_group *lgr = smc_get_lgr(link);
+	struct smc_link_extra *extra = &lgr->lnk_extra[SMC_SINGLE_LINK];
+
 	/* allocate link related memory */
 	link->wr_tx_bufs = kcalloc(SMC_WR_BUF_CNT, SMC_WR_BUF_SIZE, GFP_KERNEL);
 	if (!link->wr_tx_bufs)
@@ -573,15 +583,15 @@ int smc_wr_alloc_link_mem(struct smc_lin
 				  GFP_KERNEL);
 	if (!link->wr_rx_ibs)
 		goto no_mem_wr_tx_ibs;
-	link->wr_tx_rdmas = kcalloc(SMC_WR_BUF_CNT,
-				    sizeof(link->wr_tx_rdmas[0]),
+	extra->wr_tx_rdmas = kcalloc(SMC_WR_BUF_CNT,
+				    sizeof(extra->wr_tx_rdmas[0]),
 				    GFP_KERNEL);
-	if (!link->wr_tx_rdmas)
+	if (!extra->wr_tx_rdmas)
 		goto no_mem_wr_rx_ibs;
-	link->wr_tx_rdma_sges = kcalloc(SMC_WR_BUF_CNT,
-					sizeof(link->wr_tx_rdma_sges[0]),
+	extra->wr_tx_rdma_sges = kcalloc(SMC_WR_BUF_CNT,
+					sizeof(extra->wr_tx_rdma_sges[0]),
 					GFP_KERNEL);
-	if (!link->wr_tx_rdma_sges)
+	if (!extra->wr_tx_rdma_sges)
 		goto no_mem_wr_tx_rdmas;
 	link->wr_tx_sges = kcalloc(SMC_WR_BUF_CNT, sizeof(link->wr_tx_sges[0]),
 				   GFP_KERNEL);
@@ -611,9 +621,9 @@ no_mem_wr_rx_sges:
 no_mem_wr_tx_sges:
 	kfree(link->wr_tx_sges);
 no_mem_wr_tx_rdma_sges:
-	kfree(link->wr_tx_rdma_sges);
+	kfree(extra->wr_tx_rdma_sges);
 no_mem_wr_tx_rdmas:
-	kfree(link->wr_tx_rdmas);
+	kfree(extra->wr_tx_rdmas);
 no_mem_wr_rx_ibs:
 	kfree(link->wr_rx_ibs);
 no_mem_wr_tx_ibs: