Blob Blame History Raw
From: Shannon Nelson <snelson@pensando.io>
Date: Tue, 27 Jul 2021 10:43:29 -0700
Subject: ionic: use fewer inits on the buf_info struct
Patch-mainline: v5.15-rc1
Git-commit: e75ccac1d0644c9d7ad531cb9a81c499930c06da
References: jsc#SLE-19282

Based on Alex's review notes on [1], we don't need to write
to the buf_info elements as often, and can tighten up how they
are used.  Also, use prefetchw() to warm up the page struct
for a later get_page().

[1] https://lore.kernel.org/netdev/CAKgT0UfyjoAN7LTnq0NMZfXRv4v7iTCPyAb9pVr3qWMhop_BVw@mail.gmail.com/

Signed-off-by: Shannon Nelson <snelson@pensando.io>
Signed-off-by: David S. Miller <davem@davemloft.net>
Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
---
 drivers/net/ethernet/pensando/ionic/ionic_txrx.c |   27 +++++++++--------------
 1 file changed, 11 insertions(+), 16 deletions(-)

--- a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
@@ -32,19 +32,13 @@ static inline struct netdev_queue *q_to_
 	return netdev_get_tx_queue(q->lif->netdev, q->index);
 }
 
-static void ionic_rx_buf_reset(struct ionic_buf_info *buf_info)
-{
-	buf_info->page = NULL;
-	buf_info->page_offset = 0;
-	buf_info->dma_addr = 0;
-}
-
 static int ionic_rx_page_alloc(struct ionic_queue *q,
 			       struct ionic_buf_info *buf_info)
 {
 	struct net_device *netdev = q->lif->netdev;
 	struct ionic_rx_stats *stats;
 	struct device *dev;
+	struct page *page;
 
 	dev = q->dev;
 	stats = q_to_rx_stats(q);
@@ -55,26 +49,27 @@ static int ionic_rx_page_alloc(struct io
 		return -EINVAL;
 	}
 
-	buf_info->page = alloc_pages(IONIC_PAGE_GFP_MASK, 0);
-	if (unlikely(!buf_info->page)) {
+	page = alloc_pages(IONIC_PAGE_GFP_MASK, 0);
+	if (unlikely(!page)) {
 		net_err_ratelimited("%s: %s page alloc failed\n",
 				    netdev->name, q->name);
 		stats->alloc_err++;
 		return -ENOMEM;
 	}
-	buf_info->page_offset = 0;
 
-	buf_info->dma_addr = dma_map_page(dev, buf_info->page, buf_info->page_offset,
+	buf_info->dma_addr = dma_map_page(dev, page, 0,
 					  IONIC_PAGE_SIZE, DMA_FROM_DEVICE);
 	if (unlikely(dma_mapping_error(dev, buf_info->dma_addr))) {
-		__free_pages(buf_info->page, 0);
-		ionic_rx_buf_reset(buf_info);
+		__free_pages(page, 0);
 		net_err_ratelimited("%s: %s dma map failed\n",
 				    netdev->name, q->name);
 		stats->dma_map_err++;
 		return -EIO;
 	}
 
+	buf_info->page = page;
+	buf_info->page_offset = 0;
+
 	return 0;
 }
 
@@ -95,7 +90,7 @@ static void ionic_rx_page_free(struct io
 
 	dma_unmap_page(dev, buf_info->dma_addr, IONIC_PAGE_SIZE, DMA_FROM_DEVICE);
 	__free_pages(buf_info->page, 0);
-	ionic_rx_buf_reset(buf_info);
+	buf_info->page = NULL;
 }
 
 static bool ionic_rx_buf_recycle(struct ionic_queue *q,
@@ -139,7 +134,7 @@ static struct sk_buff *ionic_rx_frags(st
 	buf_info = &desc_info->bufs[0];
 	len = le16_to_cpu(comp->len);
 
-	prefetch(buf_info->page);
+	prefetchw(buf_info->page);
 
 	skb = napi_get_frags(&q_to_qcq(q)->napi);
 	if (unlikely(!skb)) {
@@ -170,7 +165,7 @@ static struct sk_buff *ionic_rx_frags(st
 		if (!ionic_rx_buf_recycle(q, buf_info, frag_len)) {
 			dma_unmap_page(dev, buf_info->dma_addr,
 				       IONIC_PAGE_SIZE, DMA_FROM_DEVICE);
-			ionic_rx_buf_reset(buf_info);
+			buf_info->page = NULL;
 		}
 
 		buf_info++;