Blob Blame History Raw
From 5d78abb6fbc974d601dd365b9ce39f320fb5ba79 Mon Sep 17 00:00:00 2001
From: Dave Jiang <dave.jiang@intel.com>
Date: Tue, 26 Oct 2021 14:36:02 -0700
Subject: [PATCH] dmaengine: idxd: rework descriptor free path on failure
Git-commit: 5d78abb6fbc974d601dd365b9ce39f320fb5ba79
Patch-mainline: v5.17-rc1
References: jsc#PED-846

Refactor the completion function to allow skipping of descriptor freeing on
the submission failure path. This completely removes descriptor freeing
from the submit failure path and leave the responsibility to the caller.

Reviewed-by: Kevin Tian <kevin.tian@intel.com>
Signed-off-by: Dave Jiang <dave.jiang@intel.com>
Link: https://lore.kernel.org/r/163528416222.3925689.12859769271667814762.stgit@djiang5-desk3.ch.intel.com
Signed-off-by: Vinod Koul <vkoul@kernel.org>
Acked-by: Takashi Iwai <tiwai@suse.de>

---
 drivers/dma/idxd/dma.c    |   10 ++++++++--
 drivers/dma/idxd/idxd.h   |    8 +-------
 drivers/dma/idxd/init.c   |    9 +++------
 drivers/dma/idxd/irq.c    |    8 ++++----
 drivers/dma/idxd/submit.c |   14 ++++----------
 5 files changed, 20 insertions(+), 29 deletions(-)

--- a/drivers/dma/idxd/dma.c
+++ b/drivers/dma/idxd/dma.c
@@ -21,7 +21,8 @@ static inline struct idxd_wq *to_idxd_wq
 }
 
 void idxd_dma_complete_txd(struct idxd_desc *desc,
-			   enum idxd_complete_type comp_type)
+			   enum idxd_complete_type comp_type,
+			   bool free_desc)
 {
 	struct dma_async_tx_descriptor *tx;
 	struct dmaengine_result res;
@@ -44,6 +45,9 @@ void idxd_dma_complete_txd(struct idxd_d
 		tx->callback = NULL;
 		tx->callback_result = NULL;
 	}
+
+	if (free_desc)
+		idxd_free_desc(desc->wq, desc);
 }
 
 static void op_flag_setup(unsigned long flags, u32 *desc_flags)
@@ -153,8 +157,10 @@ static dma_cookie_t idxd_dma_tx_submit(s
 	cookie = dma_cookie_assign(tx);
 
 	rc = idxd_submit_desc(wq, desc);
-	if (rc < 0)
+	if (rc < 0) {
+		idxd_free_desc(wq, desc);
 		return rc;
+	}
 
 	return cookie;
 }
--- a/drivers/dma/idxd/idxd.h
+++ b/drivers/dma/idxd/idxd.h
@@ -581,7 +581,7 @@ int idxd_register_dma_channel(struct idx
 void idxd_unregister_dma_channel(struct idxd_wq *wq);
 void idxd_parse_completion_status(u8 status, enum dmaengine_tx_result *res);
 void idxd_dma_complete_txd(struct idxd_desc *desc,
-			   enum idxd_complete_type comp_type);
+			   enum idxd_complete_type comp_type, bool free_desc);
 
 /* cdev */
 int idxd_cdev_register(void);
@@ -605,10 +605,4 @@ static inline void perfmon_init(void) {}
 static inline void perfmon_exit(void) {}
 #endif
 
-static inline void complete_desc(struct idxd_desc *desc, enum idxd_complete_type reason)
-{
-	idxd_dma_complete_txd(desc, reason);
-	idxd_free_desc(desc->wq, desc);
-}
-
 #endif
--- a/drivers/dma/idxd/init.c
+++ b/drivers/dma/idxd/init.c
@@ -717,10 +717,8 @@ static void idxd_flush_pending_llist(str
 	if (!head)
 		return;
 
-	llist_for_each_entry_safe(desc, itr, head, llnode) {
-		idxd_dma_complete_txd(desc, IDXD_COMPLETE_ABORT);
-		idxd_free_desc(desc->wq, desc);
-	}
+	llist_for_each_entry_safe(desc, itr, head, llnode)
+		idxd_dma_complete_txd(desc, IDXD_COMPLETE_ABORT, true);
 }
 
 static void idxd_flush_work_list(struct idxd_irq_entry *ie)
@@ -729,8 +727,7 @@ static void idxd_flush_work_list(struct
 
 	list_for_each_entry_safe(desc, iter, &ie->work_list, list) {
 		list_del(&desc->list);
-		idxd_dma_complete_txd(desc, IDXD_COMPLETE_ABORT);
-		idxd_free_desc(desc->wq, desc);
+		idxd_dma_complete_txd(desc, IDXD_COMPLETE_ABORT, true);
 	}
 }
 
--- a/drivers/dma/idxd/irq.c
+++ b/drivers/dma/idxd/irq.c
@@ -195,11 +195,11 @@ static void irq_process_pending_llist(st
 			 * and 0xff, which DSA_COMP_STATUS_MASK can mask out.
 			 */
 			if (unlikely(desc->completion->status == IDXD_COMP_DESC_ABORT)) {
-				complete_desc(desc, IDXD_COMPLETE_ABORT);
+				idxd_dma_complete_txd(desc, IDXD_COMPLETE_ABORT, true);
 				continue;
 			}
 
-			complete_desc(desc, IDXD_COMPLETE_NORMAL);
+			idxd_dma_complete_txd(desc, IDXD_COMPLETE_NORMAL, true);
 		} else {
 			spin_lock(&irq_entry->list_lock);
 			list_add_tail(&desc->list,
@@ -239,11 +239,11 @@ static void irq_process_work_list(struct
 		 * and 0xff, which DSA_COMP_STATUS_MASK can mask out.
 		 */
 		if (unlikely(desc->completion->status == IDXD_COMP_DESC_ABORT)) {
-			complete_desc(desc, IDXD_COMPLETE_ABORT);
+			idxd_dma_complete_txd(desc, IDXD_COMPLETE_ABORT, true);
 			continue;
 		}
 
-		complete_desc(desc, IDXD_COMPLETE_NORMAL);
+		idxd_dma_complete_txd(desc, IDXD_COMPLETE_NORMAL, true);
 	}
 }
 
--- a/drivers/dma/idxd/submit.c
+++ b/drivers/dma/idxd/submit.c
@@ -134,7 +134,7 @@ static void llist_abort_desc(struct idxd
 	spin_unlock(&ie->list_lock);
 
 	if (found)
-		complete_desc(found, IDXD_COMPLETE_ABORT);
+		idxd_dma_complete_txd(found, IDXD_COMPLETE_ABORT, false);
 
 	/*
 	 * complete_desc() will return desc to allocator and the desc can be
@@ -144,7 +144,7 @@ static void llist_abort_desc(struct idxd
 	 */
 	list_for_each_entry_safe(d, t, &flist, list) {
 		list_del_init(&d->list);
-		complete_desc(d, IDXD_COMPLETE_NORMAL);
+		idxd_dma_complete_txd(found, IDXD_COMPLETE_ABORT, true);
 	}
 }
 
@@ -155,15 +155,11 @@ int idxd_submit_desc(struct idxd_wq *wq,
 	void __iomem *portal;
 	int rc;
 
-	if (idxd->state != IDXD_DEV_ENABLED) {
-		idxd_free_desc(wq, desc);
+	if (idxd->state != IDXD_DEV_ENABLED)
 		return -EIO;
-	}
 
-	if (!percpu_ref_tryget_live(&wq->wq_active)) {
-		idxd_free_desc(wq, desc);
+	if (!percpu_ref_tryget_live(&wq->wq_active))
 		return -ENXIO;
-	}
 
 	portal = idxd_wq_portal_addr(wq);
 
@@ -198,8 +194,6 @@ int idxd_submit_desc(struct idxd_wq *wq,
 			/* abort operation frees the descriptor */
 			if (ie)
 				llist_abort_desc(wq, ie, desc);
-			else
-				idxd_free_desc(wq, desc);
 			return rc;
 		}
 	}