Blob Blame History Raw
From: Edward Cree <ecree@solarflare.com>
Date: Wed, 4 Jul 2018 19:23:50 +0100
Subject: net: ipv4: fix drop handling in ip_list_rcv() and
 ip_list_rcv_finish()
Patch-mainline: v4.19-rc1
Git-commit: a4ca8b7df73c6d78b8b5aa8246a7d794b25c25ce
References: bsc#1109837

Since callees (ip_rcv_core() and ip_rcv_finish_core()) might free or steal
 the skb, we can't use the list_cut_before() method; we can't even do a
 list_del(&skb->list) in the drop case, because skb might have already been
 freed and reused.
So instead, take each skb off the source list before processing, and add it
 to the sublist afterwards if it wasn't freed or stolen.

Fixes: 5fa12739a53d net: ipv4: listify ip_rcv_finish
Fixes: 17266ee93984 net: ipv4: listified version of ip_rcv
Signed-off-by: Edward Cree <ecree@solarflare.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
---
 net/ipv4/ip_input.c |   16 +++++++++++-----
 1 file changed, 11 insertions(+), 5 deletions(-)

--- a/net/ipv4/ip_input.c
+++ b/net/ipv4/ip_input.c
@@ -541,24 +541,27 @@ static void ip_list_rcv_finish(struct ne
 	struct sk_buff *skb, *next;
 	struct list_head sublist;
 
+	INIT_LIST_HEAD(&sublist);
 	list_for_each_entry_safe(skb, next, head, list) {
 		struct dst_entry *dst;
 
+		list_del(&skb->list);
 		if (ip_rcv_finish_core(net, sk, skb) == NET_RX_DROP)
 			continue;
 
 		dst = skb_dst(skb);
 		if (curr_dst != dst) {
 			/* dispatch old sublist */
-			list_cut_before(&sublist, head, &skb->list);
 			if (!list_empty(&sublist))
 				ip_sublist_rcv_finish(&sublist);
 			/* start new sublist */
+			INIT_LIST_HEAD(&sublist);
 			curr_dst = dst;
 		}
+		list_add_tail(&skb->list, &sublist);
 	}
 	/* dispatch final sublist */
-	ip_sublist_rcv_finish(head);
+	ip_sublist_rcv_finish(&sublist);
 }
 
 static void ip_sublist_rcv(struct list_head *head, struct net_device *dev,
@@ -578,24 +581,27 @@ void ip_list_rcv(struct list_head *head,
 	struct sk_buff *skb, *next;
 	struct list_head sublist;
 
+	INIT_LIST_HEAD(&sublist);
 	list_for_each_entry_safe(skb, next, head, list) {
 		struct net_device *dev = skb->dev;
 		struct net *net = dev_net(dev);
 
+		list_del(&skb->list);
 		skb = ip_rcv_core(skb, net);
 		if (skb == NULL)
 			continue;
 
 		if (curr_dev != dev || curr_net != net) {
 			/* dispatch old sublist */
-			list_cut_before(&sublist, head, &skb->list);
 			if (!list_empty(&sublist))
-				ip_sublist_rcv(&sublist, dev, net);
+				ip_sublist_rcv(&sublist, curr_dev, curr_net);
 			/* start new sublist */
+			INIT_LIST_HEAD(&sublist);
 			curr_dev = dev;
 			curr_net = net;
 		}
+		list_add_tail(&skb->list, &sublist);
 	}
 	/* dispatch final sublist */
-	ip_sublist_rcv(head, curr_dev, curr_net);
+	ip_sublist_rcv(&sublist, curr_dev, curr_net);
 }