Blob Blame History Raw
From: Stefan Raspl <raspl@linux.ibm.com>
Subject: net/smc: eliminate cursor read and write calls
Patch-mainline: v4.19-rc1
Git-commit: bac6de7b637018f4caacfdf2b4ad8c8749de7420
References: FATE#325698, LTC#167867, bsc#1113481

Description:  smc: Latest upstream fixes and extensions up to 8/17/2018

Upstream-Description:

              net/smc: eliminate cursor read and write calls

              The functions to read and write cursors are exclusively used to copy
              cursors. Therefore switch to a respective function instead.

              Signed-off-by: Stefan Raspl <raspl@linux.ibm.com>
              Signed-off-by: Ursula Braun <ubraun@linux.ibm.com>
              Signed-off-by: David S. Miller <davem@davemloft.net>

Signed-off-by: Stefan Raspl <raspl@linux.ibm.com>
Acked-by: Petr Tesarik <ptesarik@suse.com>
---
 net/smc/af_smc.c  |    8 ++------
 net/smc/smc_cdc.c |   33 ++++++++++++---------------------
 net/smc/smc_cdc.h |   43 +++++++++++++++----------------------------
 net/smc/smc_rx.c  |   15 ++++-----------
 net/smc/smc_tx.c  |   46 +++++++++++++---------------------------------
 net/smc/smc_tx.h  |    4 ++--
 6 files changed, 48 insertions(+), 101 deletions(-)

--- a/net/smc/af_smc.c
+++ b/net/smc/af_smc.c
@@ -1759,12 +1759,8 @@ static int smc_ioctl(struct socket *sock
 		    smc->sk.sk_state == SMC_CLOSED) {
 			answ = 0;
 		} else {
-			smc_curs_write(&cons,
-			       smc_curs_read(&conn->local_tx_ctrl.cons, conn),
-				       conn);
-			smc_curs_write(&urg,
-				       smc_curs_read(&conn->urg_curs, conn),
-				       conn);
+			smc_curs_copy(&cons, &conn->local_tx_ctrl.cons, conn);
+			smc_curs_copy(&urg, &conn->urg_curs, conn);
 			answ = smc_curs_diff(conn->rmb_desc->len,
 					     &cons, &urg) == 1;
 		}
--- a/net/smc/smc_cdc.c
+++ b/net/smc/smc_cdc.c
@@ -33,14 +33,15 @@ static void smc_cdc_tx_handler(struct sm
 			       enum ib_wc_status wc_status)
 {
 	struct smc_cdc_tx_pend *cdcpend = (struct smc_cdc_tx_pend *)pnd_snd;
+	struct smc_connection *conn = cdcpend->conn;
 	struct smc_sock *smc;
 	int diff;
 
-	if (!cdcpend->conn)
+	if (!conn)
 		/* already dismissed */
 		return;
 
-	smc = container_of(cdcpend->conn, struct smc_sock, conn);
+	smc = container_of(conn, struct smc_sock, conn);
 	bh_lock_sock(&smc->sk);
 	if (!wc_status) {
 		diff = smc_curs_diff(cdcpend->conn->sndbuf_desc->len,
@@ -51,9 +52,7 @@ static void smc_cdc_tx_handler(struct sm
 		atomic_add(diff, &cdcpend->conn->sndbuf_space);
 		/* guarantee 0 <= sndbuf_space <= sndbuf_desc->len */
 		smp_mb__after_atomic();
-		smc_curs_write(&cdcpend->conn->tx_curs_fin,
-			       smc_curs_read(&cdcpend->cursor, cdcpend->conn),
-			       cdcpend->conn);
+		smc_curs_copy(&conn->tx_curs_fin, &cdcpend->cursor, conn);
 	}
 	smc_tx_sndbuf_nonfull(smc);
 	bh_unlock_sock(&smc->sk);
@@ -109,9 +108,8 @@ int smc_cdc_msg_send(struct smc_connecti
 			    &conn->local_tx_ctrl, conn);
 	rc = smc_wr_tx_send(link, (struct smc_wr_tx_pend_priv *)pend);
 	if (!rc)
-		smc_curs_write(&conn->rx_curs_confirmed,
-			       smc_curs_read(&conn->local_tx_ctrl.cons, conn),
-			       conn);
+		smc_curs_copy(&conn->rx_curs_confirmed,
+			      &conn->local_tx_ctrl.cons, conn);
 
 	return rc;
 }
@@ -193,8 +191,8 @@ int smcd_cdc_msg_send(struct smc_connect
 	rc = smcd_tx_ism_write(conn, &cdc, sizeof(cdc), 0, 1);
 	if (rc)
 		return rc;
-	smc_curs_write(&conn->rx_curs_confirmed,
-		       smc_curs_read(&conn->local_tx_ctrl.cons, conn), conn);
+	smc_curs_copy(&conn->rx_curs_confirmed, &conn->local_tx_ctrl.cons,
+		      conn);
 	/* Calculate transmitted data and increment free send buffer space */
 	diff = smc_curs_diff(conn->sndbuf_desc->len, &conn->tx_curs_fin,
 			     &conn->tx_curs_sent);
@@ -203,8 +201,7 @@ int smcd_cdc_msg_send(struct smc_connect
 	atomic_add(diff, &conn->sndbuf_space);
 	/* guarantee 0 <= sndbuf_space <= sndbuf_desc->len */
 	smp_mb__after_atomic();
-	smc_curs_write(&conn->tx_curs_fin,
-		       smc_curs_read(&conn->tx_curs_sent, conn), conn);
+	smc_curs_copy(&conn->tx_curs_fin, &conn->tx_curs_sent, conn);
 
 	smc_tx_sndbuf_nonfull(smc);
 	return rc;
@@ -224,9 +221,7 @@ static void smc_cdc_handle_urg_data_arri
 	char *base;
 
 	/* new data included urgent business */
-	smc_curs_write(&conn->urg_curs,
-		       smc_curs_read(&conn->local_rx_ctrl.prod, conn),
-		       conn);
+	smc_curs_copy(&conn->urg_curs, &conn->local_rx_ctrl.prod, conn);
 	conn->urg_state = SMC_URG_VALID;
 	if (!sock_flag(&smc->sk, SOCK_URGINLINE))
 		/* we'll skip the urgent byte, so don't account for it */
@@ -246,12 +241,8 @@ static void smc_cdc_msg_recv_action(stru
 	struct smc_connection *conn = &smc->conn;
 	int diff_cons, diff_prod;
 
-	smc_curs_write(&prod_old,
-		       smc_curs_read(&conn->local_rx_ctrl.prod, conn),
-		       conn);
-	smc_curs_write(&cons_old,
-		       smc_curs_read(&conn->local_rx_ctrl.cons, conn),
-		       conn);
+	smc_curs_copy(&prod_old, &conn->local_rx_ctrl.prod, conn);
+	smc_curs_copy(&cons_old, &conn->local_rx_ctrl.cons, conn);
 	smc_cdc_msg_to_host(&conn->local_rx_ctrl, cdc, conn);
 
 	diff_cons = smc_curs_diff(conn->peer_rmbe_size, &cons_old,
--- a/net/smc/smc_cdc.h
+++ b/net/smc/smc_cdc.h
@@ -103,47 +103,34 @@ static inline u64 smc_curs_read(union sm
 #endif
 }
 
-static inline u64 smc_curs_read_net(union smc_cdc_cursor *curs,
-				    struct smc_connection *conn)
+/* Copy cursor src into tgt */
+static inline void smc_curs_copy(union smc_host_cursor *tgt,
+				 union smc_host_cursor *src,
+				 struct smc_connection *conn)
 {
 #ifndef KERNEL_HAS_ATOMIC64
 	unsigned long flags;
-	u64 ret;
 
 	spin_lock_irqsave(&conn->acurs_lock, flags);
-	ret = curs->acurs;
+	tgt->acurs = src->acurs;
 	spin_unlock_irqrestore(&conn->acurs_lock, flags);
-	return ret;
 #else
-	return atomic64_read(&curs->acurs);
+	atomic64_set(&tgt->acurs, atomic64_read(&src->acurs));
 #endif
 }
 
-static inline void smc_curs_write(union smc_host_cursor *curs, u64 val,
-				  struct smc_connection *conn)
+static inline void smc_curs_copy_net(union smc_cdc_cursor *tgt,
+				     union smc_cdc_cursor *src,
+				     struct smc_connection *conn)
 {
 #ifndef KERNEL_HAS_ATOMIC64
 	unsigned long flags;
 
 	spin_lock_irqsave(&conn->acurs_lock, flags);
-	curs->acurs = val;
+	tgt->acurs = src->acurs;
 	spin_unlock_irqrestore(&conn->acurs_lock, flags);
 #else
-	atomic64_set(&curs->acurs, val);
-#endif
-}
-
-static inline void smc_curs_write_net(union smc_cdc_cursor *curs, u64 val,
-				      struct smc_connection *conn)
-{
-#ifndef KERNEL_HAS_ATOMIC64
-	unsigned long flags;
-
-	spin_lock_irqsave(&conn->acurs_lock, flags);
-	curs->acurs = val;
-	spin_unlock_irqrestore(&conn->acurs_lock, flags);
-#else
-	atomic64_set(&curs->acurs, val);
+	atomic64_set(&tgt->acurs, atomic64_read(&src->acurs));
 #endif
 }
 
@@ -178,7 +165,7 @@ static inline void smc_host_cursor_to_cd
 {
 	union smc_host_cursor temp;
 
-	smc_curs_write(&temp, smc_curs_read(local, conn), conn);
+	smc_curs_copy(&temp, local, conn);
 	peer->count = htonl(temp.count);
 	peer->wrap = htons(temp.wrap);
 	/* peer->reserved = htons(0); must be ensured by caller */
@@ -205,8 +192,8 @@ static inline void smc_cdc_cursor_to_hos
 	union smc_host_cursor temp, old;
 	union smc_cdc_cursor net;
 
-	smc_curs_write(&old, smc_curs_read(local, conn), conn);
-	smc_curs_write_net(&net, smc_curs_read_net(peer, conn), conn);
+	smc_curs_copy(&old, local, conn);
+	smc_curs_copy_net(&net, peer, conn);
 	temp.count = ntohl(net.count);
 	temp.wrap = ntohs(net.wrap);
 	if ((old.wrap > temp.wrap) && temp.wrap)
@@ -214,7 +201,7 @@ static inline void smc_cdc_cursor_to_hos
 	if ((old.wrap == temp.wrap) &&
 	    (old.count > temp.count))
 		return;
-	smc_curs_write(local, smc_curs_read(&temp, conn), conn);
+	smc_curs_copy(local, &temp, conn);
 }
 
 static inline void smcr_cdc_msg_to_host(struct smc_host_cdc_msg *local,
--- a/net/smc/smc_rx.c
+++ b/net/smc/smc_rx.c
@@ -81,8 +81,7 @@ static int smc_rx_update_consumer(struct
 		}
 	}
 
-	smc_curs_write(&conn->local_tx_ctrl.cons, smc_curs_read(&cons, conn),
-		       conn);
+	smc_curs_copy(&conn->local_tx_ctrl.cons, &cons, conn);
 
 	/* send consumer cursor update if required */
 	/* similar to advertising new TCP rcv_wnd if required */
@@ -96,8 +95,7 @@ static void smc_rx_update_cons(struct sm
 	struct smc_connection *conn = &smc->conn;
 	union smc_host_cursor cons;
 
-	smc_curs_write(&cons, smc_curs_read(&conn->local_tx_ctrl.cons, conn),
-		       conn);
+	smc_curs_copy(&cons, &conn->local_tx_ctrl.cons, conn);
 	smc_rx_update_consumer(smc, cons, len);
 }
 
@@ -244,10 +242,7 @@ static int smc_rx_recv_urg(struct smc_so
 			if (!(flags & MSG_TRUNC))
 				rc = memcpy_to_msg(msg, &conn->urg_rx_byte, 1);
 			len = 1;
-			smc_curs_write(&cons,
-				       smc_curs_read(&conn->local_tx_ctrl.cons,
-						     conn),
-				       conn);
+			smc_curs_copy(&cons, &conn->local_tx_ctrl.cons, conn);
 			if (smc_curs_diff(conn->rmb_desc->len, &cons,
 					  &conn->urg_curs) > 1)
 				conn->urg_rx_skip_pend = true;
@@ -369,9 +364,7 @@ copy:
 			continue;
 		}
 
-		smc_curs_write(&cons,
-			       smc_curs_read(&conn->local_tx_ctrl.cons, conn),
-			       conn);
+		smc_curs_copy(&cons, &conn->local_tx_ctrl.cons, conn);
 		/* subsequent splice() calls pick up where previous left */
 		if (splbytes)
 			smc_curs_add(conn->rmb_desc->len, &cons, splbytes);
--- a/net/smc/smc_tx.c
+++ b/net/smc/smc_tx.c
@@ -180,9 +180,7 @@ int smc_tx_sendmsg(struct smc_sock *smc,
 		copylen = min_t(size_t, send_remaining, writespace);
 		/* determine start of sndbuf */
 		sndbuf_base = conn->sndbuf_desc->cpu_addr;
-		smc_curs_write(&prep,
-			       smc_curs_read(&conn->tx_curs_prep, conn),
-			       conn);
+		smc_curs_copy(&prep, &conn->tx_curs_prep, conn);
 		tx_cnt_prep = prep.count;
 		/* determine chunks where to write into sndbuf */
 		/* either unwrapped case, or 1st chunk of wrapped case */
@@ -213,9 +211,7 @@ int smc_tx_sendmsg(struct smc_sock *smc,
 		smc_sndbuf_sync_sg_for_device(conn);
 		/* update cursors */
 		smc_curs_add(conn->sndbuf_desc->len, &prep, copylen);
-		smc_curs_write(&conn->tx_curs_prep,
-			       smc_curs_read(&prep, conn),
-			       conn);
+		smc_curs_copy(&conn->tx_curs_prep, &prep, conn);
 		/* increased in send tasklet smc_cdc_tx_handler() */
 		smp_mb__before_atomic();
 		atomic_sub(copylen, &conn->sndbuf_space);
@@ -416,8 +412,8 @@ static int smc_tx_rdma_writes(struct smc
 	int rc;
 
 	/* source: sndbuf */
-	smc_curs_write(&sent, smc_curs_read(&conn->tx_curs_sent, conn), conn);
-	smc_curs_write(&prep, smc_curs_read(&conn->tx_curs_prep, conn), conn);
+	smc_curs_copy(&sent, &conn->tx_curs_sent, conn);
+	smc_curs_copy(&prep, &conn->tx_curs_prep, conn);
 	/* cf. wmem_alloc - (snd_max - snd_una) */
 	to_send = smc_curs_diff(conn->sndbuf_desc->len, &sent, &prep);
 	if (to_send <= 0)
@@ -428,12 +424,8 @@ static int smc_tx_rdma_writes(struct smc
 	rmbespace = atomic_read(&conn->peer_rmbe_space);
 	if (rmbespace <= 0)
 		return 0;
-	smc_curs_write(&prod,
-		       smc_curs_read(&conn->local_tx_ctrl.prod, conn),
-		       conn);
-	smc_curs_write(&cons,
-		       smc_curs_read(&conn->local_rx_ctrl.cons, conn),
-		       conn);
+	smc_curs_copy(&prod, &conn->local_tx_ctrl.prod, conn);
+	smc_curs_copy(&cons, &conn->local_rx_ctrl.cons, conn);
 
 	/* if usable snd_wnd closes ask peer to advertise once it opens again */
 	pflags = &conn->local_tx_ctrl.prod_flags;
@@ -480,14 +472,9 @@ static int smc_tx_rdma_writes(struct smc
 		pflags->urg_data_present = 1;
 	smc_tx_advance_cursors(conn, &prod, &sent, len);
 	/* update connection's cursors with advanced local cursors */
-	smc_curs_write(&conn->local_tx_ctrl.prod,
-		       smc_curs_read(&prod, conn),
-		       conn);
+	smc_curs_copy(&conn->local_tx_ctrl.prod, &prod, conn);
 							/* dst: peer RMBE */
-	smc_curs_write(&conn->tx_curs_sent,
-		       smc_curs_read(&sent, conn),
-		       conn);
-							/* src: local sndbuf */
+	smc_curs_copy(&conn->tx_curs_sent, &sent, conn);/* src: local sndbuf */
 
 	return 0;
 }
@@ -605,17 +592,11 @@ void smc_tx_consumer_update(struct smc_c
 	int sender_free = conn->rmb_desc->len;
 	int to_confirm;
 
-	smc_curs_write(&cons,
-		       smc_curs_read(&conn->local_tx_ctrl.cons, conn),
-		       conn);
-	smc_curs_write(&cfed,
-		       smc_curs_read(&conn->rx_curs_confirmed, conn),
-		       conn);
+	smc_curs_copy(&cons, &conn->local_tx_ctrl.cons, conn);
+	smc_curs_copy(&cfed, &conn->rx_curs_confirmed, conn);
 	to_confirm = smc_curs_diff(conn->rmb_desc->len, &cfed, &cons);
 	if (to_confirm > conn->rmbe_update_limit) {
-		smc_curs_write(&prod,
-			       smc_curs_read(&conn->local_rx_ctrl.prod, conn),
-			       conn);
+		smc_curs_copy(&prod, &conn->local_rx_ctrl.prod, conn);
 		sender_free = conn->rmb_desc->len -
 			      smc_curs_diff(conn->rmb_desc->len, &prod, &cfed);
 	}
@@ -631,9 +612,8 @@ void smc_tx_consumer_update(struct smc_c
 					      SMC_TX_WORK_DELAY);
 			return;
 		}
-		smc_curs_write(&conn->rx_curs_confirmed,
-			       smc_curs_read(&conn->local_tx_ctrl.cons, conn),
-			       conn);
+		smc_curs_copy(&conn->rx_curs_confirmed,
+			      &conn->local_tx_ctrl.cons, conn);
 		conn->local_rx_ctrl.prod_flags.cons_curs_upd_req = 0;
 	}
 	if (conn->local_rx_ctrl.prod_flags.write_blocked &&
--- a/net/smc/smc_tx.h
+++ b/net/smc/smc_tx.h
@@ -21,8 +21,8 @@ static inline int smc_tx_prepared_sends(
 {
 	union smc_host_cursor sent, prep;
 
-	smc_curs_write(&sent, smc_curs_read(&conn->tx_curs_sent, conn), conn);
-	smc_curs_write(&prep, smc_curs_read(&conn->tx_curs_prep, conn), conn);
+	smc_curs_copy(&sent, &conn->tx_curs_sent, conn);
+	smc_curs_copy(&prep, &conn->tx_curs_prep, conn);
 	return smc_curs_diff(conn->sndbuf_desc->len, &sent, &prep);
 }