Blob Blame History Raw
From: John Fastabend <john.fastabend@gmail.com>
Date: Sun, 18 Mar 2018 12:57:05 -0700
Subject: net: generalize sk_alloc_sg to work with scatterlist rings
Patch-mainline: v4.17-rc1
Git-commit: 8c05dbf04b2882c3c0bc43fe7668c720210877f3
References: bsc#1103990 FATE#326006

The current implementation of sk_alloc_sg expects scatterlist to always
start at entry 0 and complete at entry MAX_SKB_FRAGS.

Future patches will want to support starting at arbitrary offset into
scatterlist so add an additional sg_start parameters and then default
to the current values in TLS code paths.

Signed-off-by: John Fastabend <john.fastabend@gmail.com>
Acked-by: David S. Miller <davem@davemloft.net>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
---
 include/net/sock.h |    2 +-
 net/core/sock.c    |   27 ++++++++++++++++-----------
 net/tls/tls_sw.c   |    4 ++--
 3 files changed, 19 insertions(+), 14 deletions(-)

--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -2150,7 +2150,7 @@ static inline struct page_frag *sk_page_
 bool sk_page_frag_refill(struct sock *sk, struct page_frag *pfrag);
 
 int sk_alloc_sg(struct sock *sk, int len, struct scatterlist *sg,
-		int *sg_num_elem, unsigned int *sg_size,
+		int sg_start, int *sg_curr, unsigned int *sg_size,
 		int first_coalesce);
 
 /*
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -2138,19 +2138,20 @@ bool sk_page_frag_refill(struct sock *sk
 EXPORT_SYMBOL(sk_page_frag_refill);
 
 int sk_alloc_sg(struct sock *sk, int len, struct scatterlist *sg,
-		int *sg_num_elem, unsigned int *sg_size,
+		int sg_start, int *sg_curr_index, unsigned int *sg_curr_size,
 		int first_coalesce)
 {
+	int sg_curr = *sg_curr_index, use = 0, rc = 0;
+	unsigned int size = *sg_curr_size;
 	struct page_frag *pfrag;
-	unsigned int size = *sg_size;
-	int num_elem = *sg_num_elem, use = 0, rc = 0;
 	struct scatterlist *sge;
-	unsigned int orig_offset;
 
 	len -= size;
 	pfrag = sk_page_frag(sk);
 
 	while (len > 0) {
+		unsigned int orig_offset;
+
 		if (!sk_page_frag_refill(sk, pfrag)) {
 			rc = -ENOMEM;
 			goto out;
@@ -2168,17 +2169,21 @@ int sk_alloc_sg(struct sock *sk, int len
 		orig_offset = pfrag->offset;
 		pfrag->offset += use;
 
-		sge = sg + num_elem - 1;
-		if (num_elem > first_coalesce && sg_page(sg) == pfrag->page &&
+		sge = sg + sg_curr - 1;
+		if (sg_curr > first_coalesce && sg_page(sg) == pfrag->page &&
 		    sg->offset + sg->length == orig_offset) {
 			sg->length += use;
 		} else {
-			sge++;
+			sge = sg + sg_curr;
 			sg_unmark_end(sge);
 			sg_set_page(sge, pfrag->page, use, orig_offset);
 			get_page(pfrag->page);
-			++num_elem;
-			if (num_elem == MAX_SKB_FRAGS) {
+			sg_curr++;
+
+			if (sg_curr == MAX_SKB_FRAGS)
+				sg_curr = 0;
+
+			if (sg_curr == sg_start) {
 				rc = -ENOSPC;
 				break;
 			}
@@ -2187,8 +2192,8 @@ int sk_alloc_sg(struct sock *sk, int len
 		len -= use;
 	}
 out:
-	*sg_size = size;
-	*sg_num_elem = num_elem;
+	*sg_curr_size = size;
+	*sg_curr_index = sg_curr;
 	return rc;
 }
 EXPORT_SYMBOL(sk_alloc_sg);
--- a/net/tls/tls_sw.c
+++ b/net/tls/tls_sw.c
@@ -94,7 +94,7 @@ static int alloc_encrypted_sg(struct soc
 	int rc = 0;
 
 	rc = sk_alloc_sg(sk, len,
-			 ctx->sg_encrypted_data,
+			 ctx->sg_encrypted_data, 0,
 			 &ctx->sg_encrypted_num_elem,
 			 &ctx->sg_encrypted_size, 0);
 
@@ -107,7 +107,7 @@ static int alloc_plaintext_sg(struct soc
 	struct tls_sw_context *ctx = tls_sw_ctx(tls_ctx);
 	int rc = 0;
 
-	rc = sk_alloc_sg(sk, len, ctx->sg_plaintext_data,
+	rc = sk_alloc_sg(sk, len, ctx->sg_plaintext_data, 0,
 			 &ctx->sg_plaintext_num_elem, &ctx->sg_plaintext_size,
 			 tls_ctx->pending_open_record_frags);