Blob Blame History Raw
From: Edward Cree <ecree@solarflare.com>
Date: Mon, 9 Jul 2018 18:09:54 +0100
Subject: net: core: fix uses-after-free in list processing
Patch-mainline: v4.19-rc1
Git-commit: 8c057efaebb557b60ba514b5e39e8000a1eab0f1
References: bsc#1109837

In netif_receive_skb_list_internal(), all of skb_defer_rx_timestamp(),
 do_xdp_generic() and enqueue_to_backlog() can lead to kfree(skb).  Thus,
 we cannot wait until after they return to remove the skb from the list;
 instead, we remove it first and, in the pass case, add it to a sublist
 afterwards.
In the case of enqueue_to_backlog() we have already decided not to pass
 when we call the function, so we do not need a sublist.

Fixes: 7da517a3bc52 ("net: core: Another step of skb receive list processing")
Reported-by: Dan Carpenter <dan.carpenter@oracle.com>
Signed-off-by: Edward Cree <ecree@solarflare.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
---
 net/core/dev.c |   21 +++++++++++++--------
 1 file changed, 13 insertions(+), 8 deletions(-)

--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -4957,25 +4957,30 @@ static void netif_receive_skb_list_inter
 {
 	struct bpf_prog *xdp_prog = NULL;
 	struct sk_buff *skb, *next;
+	struct list_head sublist;
 
+	INIT_LIST_HEAD(&sublist);
 	list_for_each_entry_safe(skb, next, head, list) {
 		net_timestamp_check(netdev_tstamp_prequeue, skb);
-		if (skb_defer_rx_timestamp(skb))
-			/* Handled, remove from list */
-			list_del(&skb->list);
+		list_del(&skb->list);
+		if (!skb_defer_rx_timestamp(skb))
+			list_add_tail(&skb->list, &sublist);
 	}
+	list_splice_init(&sublist, head);
 
 	if (static_branch_unlikely(&generic_xdp_needed_key)) {
 		preempt_disable();
 		rcu_read_lock();
 		list_for_each_entry_safe(skb, next, head, list) {
 			xdp_prog = rcu_dereference(skb->dev->xdp_prog);
-			if (do_xdp_generic(xdp_prog, skb) != XDP_PASS)
-				/* Dropped, remove from list */
-				list_del(&skb->list);
+			list_del(&skb->list);
+			if (do_xdp_generic(xdp_prog, skb) == XDP_PASS)
+				list_add_tail(&skb->list, &sublist);
 		}
 		rcu_read_unlock();
 		preempt_enable();
+		/* Put passed packets back on main list */
+		list_splice_init(&sublist, head);
 	}
 
 	rcu_read_lock();
@@ -4986,9 +4991,9 @@ static void netif_receive_skb_list_inter
 			int cpu = get_rps_cpu(skb->dev, skb, &rflow);
 
 			if (cpu >= 0) {
-				enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
-				/* Handled, remove from list */
+				/* Will be handled, remove from list */
 				list_del(&skb->list);
+				enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
 			}
 		}
 	}