Blob Blame History Raw
From: Jakub Kicinski <jakub.kicinski@netronome.com>
Date: Mon, 30 Jul 2018 20:43:52 -0700
Subject: xsk: refactor xdp_umem_assign_dev()
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
Patch-mainline: v4.19-rc1
Git-commit: f734607e819b951bae3b436b026ec672082e9241
References: bsc#1109837

Return early and only take the ref on dev once there is no possibility
of failing.

Signed-off-by: Jakub Kicinski <jakub.kicinski@netronome.com>
Reviewed-by: Quentin Monnet <quentin.monnet@netronome.com>
Acked-by: Björn Töpel <bjorn.topel@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
---
 net/xdp/xdp_umem.c |   57 +++++++++++++++++++++++------------------------------
 1 file changed, 25 insertions(+), 32 deletions(-)

--- a/net/xdp/xdp_umem.c
+++ b/net/xdp/xdp_umem.c
@@ -56,41 +56,34 @@ int xdp_umem_assign_dev(struct xdp_umem
 	if (force_copy)
 		return 0;
 
-	dev_hold(dev);
+	if (!dev->netdev_ops->ndo_bpf || !dev->netdev_ops->ndo_xsk_async_xmit)
+		return force_zc ? -ENOTSUPP : 0; /* fail or fallback */
 
-	if (dev->netdev_ops->ndo_bpf && dev->netdev_ops->ndo_xsk_async_xmit) {
-		bpf.command = XDP_QUERY_XSK_UMEM;
+	bpf.command = XDP_QUERY_XSK_UMEM;
 
-		rtnl_lock();
-		err = dev->netdev_ops->ndo_bpf(dev, &bpf);
-		rtnl_unlock();
-
-		if (err) {
-			dev_put(dev);
-			return force_zc ? -ENOTSUPP : 0;
-		}
-
-		bpf.command = XDP_SETUP_XSK_UMEM;
-		bpf.xsk.umem = umem;
-		bpf.xsk.queue_id = queue_id;
-
-		rtnl_lock();
-		err = dev->netdev_ops->ndo_bpf(dev, &bpf);
-		rtnl_unlock();
-
-		if (err) {
-			dev_put(dev);
-			return force_zc ? err : 0; /* fail or fallback */
-		}
-
-		umem->dev = dev;
-		umem->queue_id = queue_id;
-		umem->zc = true;
-		return 0;
-	}
+	rtnl_lock();
+	err = dev->netdev_ops->ndo_bpf(dev, &bpf);
+	rtnl_unlock();
+
+	if (err)
+		return force_zc ? -ENOTSUPP : 0;
+
+	bpf.command = XDP_SETUP_XSK_UMEM;
+	bpf.xsk.umem = umem;
+	bpf.xsk.queue_id = queue_id;
 
-	dev_put(dev);
-	return force_zc ? -ENOTSUPP : 0; /* fail or fallback */
+	rtnl_lock();
+	err = dev->netdev_ops->ndo_bpf(dev, &bpf);
+	rtnl_unlock();
+
+	if (err)
+		return force_zc ? err : 0; /* fail or fallback */
+
+	dev_hold(dev);
+	umem->dev = dev;
+	umem->queue_id = queue_id;
+	umem->zc = true;
+	return 0;
 }
 
 static void xdp_umem_clear_dev(struct xdp_umem *umem)