Blob Blame History Raw
From: Petr Tesarik <ptesarik@suse.com>
Subject: net/smc: kABI workarounds for struct smc_link
Patch-mainline: Never, kABI workaround
References: git-fixes

Commit e9b1a4f867ae9c1dbd1d71cd09cbdb3239fb4968 adds two new fields
to struct smc_link, which breaks kABI. It is not possible to mask
the change by moving the fields to the end of the structure, because
struct smc_link itself is embedded in struct smc_link_group.

However, struct smc_link is never allocated outside of struct
smc_link_group; it even contains its index in the lnk[] array
(link_idx), so it is possible to move those fields to another array
in struct smc_link_group, and use the backpointer plus the index
to access the corresponding fields.

It is safe to enlarge struct smc_link_group, because it is only ever
allocated by smc_lgr_create() and never embedded in another data
structure.

Signed-off-by: Petr Tesarik <ptesarik@suse.com>
---
 net/smc/smc_core.c |    2 +-
 net/smc/smc_core.h |   12 ++++++++++--
 net/smc/smc_wr.c   |   10 ++++++----
 net/smc/smc_wr.h   |    5 ++++-
 4 files changed, 21 insertions(+), 8 deletions(-)

--- a/net/smc/smc_core.c
+++ b/net/smc/smc_core.c
@@ -750,7 +750,7 @@ int smcr_link_init(struct smc_link_group
 	lnk->link_id = smcr_next_link_id(lgr);
 	lnk->lgr = lgr;
 	lnk->link_idx = link_idx;
-	lnk->wr_rx_id_compl = 0;
+	lgr->lnk_kabi_fixup[link_idx].wr_rx_id_compl = 0;
 	smc_ibdev_cnt_inc(lnk);
 	smcr_copy_dev_info_to_link(lnk);
 	atomic_set(&lnk->conn_cnt, 0);
--- a/net/smc/smc_core.h
+++ b/net/smc/smc_core.h
@@ -115,10 +115,8 @@ struct smc_link {
 	dma_addr_t		wr_rx_dma_addr;	/* DMA address of wr_rx_bufs */
 	dma_addr_t		wr_rx_v2_dma_addr; /* DMA address of v2 rx buf*/
 	u64			wr_rx_id;	/* seq # of last recv WR */
-	u64			wr_rx_id_compl; /* seq # of last completed WR */
 	u32			wr_rx_cnt;	/* number of WR recv buffers */
 	unsigned long		wr_rx_tstamp;	/* jiffies when last buf rx */
-	wait_queue_head_t       wr_rx_empty_wait; /* wait for RQ empty */
 
 	struct ib_reg_wr	wr_reg;		/* WR register memory region */
 	wait_queue_head_t	wr_reg_wait;	/* wait for wr_reg result */
@@ -151,6 +149,12 @@ struct smc_link {
 	atomic_t		conn_cnt; /* connections on this link */
 };
 
+/* Extra fields removed from struct smc_link to preserve kABI. */
+struct smc_link_kabi_fixup {
+	u64			wr_rx_id_compl; /* seq # of last completed WR */
+	wait_queue_head_t       wr_rx_empty_wait; /* wait for RQ empty */
+};
+
 /* For now we just allow one parallel link per link group. The SMC protocol
  * allows more (up to 8).
  */
@@ -308,6 +312,10 @@ struct smc_link_group {
 			u8			nexthop_mac[ETH_ALEN];
 			u8			uses_gateway;
 			__be32			saddr;
+#ifndef __GENKSYMS__
+			struct smc_link_kabi_fixup lnk_kabi_fixup[SMC_LINKS_PER_LGR_MAX];
+						/* extra lnk fields */
+#endif
 		};
 		struct { /* SMC-D */
 			u64			peer_gid;
--- a/net/smc/smc_wr.c
+++ b/net/smc/smc_wr.c
@@ -454,11 +454,13 @@ static inline void smc_wr_rx_demultiplex
 static inline void smc_wr_rx_process_cqes(struct ib_wc wc[], int num)
 {
 	struct smc_link *link;
+	struct smc_link_kabi_fixup *link_kabi_fixup;
 	int i;
 
 	for (i = 0; i < num; i++) {
 		link = wc[i].qp->qp_context;
-		link->wr_rx_id_compl = wc[i].wr_id;
+		link_kabi_fixup = &link->lgr->lnk_kabi_fixup[link->link_idx];
+		link_kabi_fixup->wr_rx_id_compl = wc[i].wr_id;
 		if (wc[i].status == IB_WC_SUCCESS) {
 			link->wr_rx_tstamp = jiffies;
 			smc_wr_rx_demultiplex(&wc[i]);
@@ -470,8 +472,8 @@ static inline void smc_wr_rx_process_cqe
 			case IB_WC_RNR_RETRY_EXC_ERR:
 			case IB_WC_WR_FLUSH_ERR:
 				smcr_link_down_cond_sched(link);
-				if (link->wr_rx_id_compl == link->wr_rx_id)
-					wake_up(&link->wr_rx_empty_wait);
+				if (link_kabi_fixup->wr_rx_id_compl == link->wr_rx_id)
+					wake_up(&link_kabi_fixup->wr_rx_empty_wait);
 				break;
 			default:
 				smc_wr_rx_post(link); /* refill WR RX */
@@ -897,7 +899,7 @@ int smc_wr_create_link(struct smc_link *
 	atomic_set(&lnk->wr_tx_refcnt, 0);
 	init_waitqueue_head(&lnk->wr_reg_wait);
 	atomic_set(&lnk->wr_reg_refcnt, 0);
-	init_waitqueue_head(&lnk->wr_rx_empty_wait);
+	init_waitqueue_head(&lnk->lgr->lnk_kabi_fixup[lnk->link_idx].wr_rx_empty_wait);
 	return rc;
 
 dma_unmap:
--- a/net/smc/smc_wr.h
+++ b/net/smc/smc_wr.h
@@ -75,7 +75,10 @@ static inline void smc_wr_tx_link_put(st
 
 static inline void smc_wr_drain_cq(struct smc_link *lnk)
 {
-	wait_event(lnk->wr_rx_empty_wait, lnk->wr_rx_id_compl == lnk->wr_rx_id);
+	struct smc_link_kabi_fixup *lnk_fixup =
+		&lnk->lgr->lnk_kabi_fixup[lnk->link_idx];
+	wait_event(lnk_fixup->wr_rx_empty_wait,
+		   lnk_fixup->wr_rx_id_compl == lnk->wr_rx_id);
 }
 
 static inline void smc_wr_wakeup_tx_wait(struct smc_link *lnk)