Blob Blame History Raw
From: Xiang Chen <chenxiang66@hisilicon.com>
Date: Mon, 20 Dec 2021 19:21:37 +0800
Subject: scsi: libsas: Keep host active while processing events
Git-commit: 307d9f49cce966c2ba969f58bd6227bc0092afaa
Patch-mainline: v5.17-rc1
References: bsc#1198802

Processing events such as PORTE_BROADCAST_RCVD may cause dependency issues
for runtime power management support.  Such a problem would be that
handling a PORTE_BROADCAST_RCVD event requires that the host is resumed to
send SMP commands. However, in resuming the host, the phyup events
generated from re-enabling the phys are processed in the same workqueue as
the original PORTE_BROADCAST_RCVD event. As such, the host will never
finish resuming (as it waits for the phyup event processing), and then the
PORTE_BROADCAST_RCVD event can't be processed as the SMP commands are
blocked, and so we have a deadlock.  Solve this problem by ensuring that
libsas keeps the host active until completely finished phy or port events,
such as PORTE_BYTES_DMAED. As such, we don't have to worry about resuming
the host for processing individual SMP commands in this example.

Link: https://lore.kernel.org/r/1639999298-244569-15-git-send-email-chenxiang66@hisilicon.com
Reviewed-by: John Garry <john.garry@huawei.com>
Signed-off-by: Xiang Chen <chenxiang66@hisilicon.com>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
Acked-by: Lee Duncan <lduncan@suse.com>
---
 drivers/scsi/libsas/sas_event.c | 24 +++++++++++++++++++++---
 1 file changed, 21 insertions(+), 3 deletions(-)

diff --git a/drivers/scsi/libsas/sas_event.c b/drivers/scsi/libsas/sas_event.c
index 626ef96b9348..3613b9b315bc 100644
--- a/drivers/scsi/libsas/sas_event.c
+++ b/drivers/scsi/libsas/sas_event.c
@@ -50,8 +50,10 @@ void sas_queue_deferred_work(struct sas_ha_struct *ha)
 	list_for_each_entry_safe(sw, _sw, &ha->defer_q, drain_node) {
 		list_del_init(&sw->drain_node);
 		ret = sas_queue_work(ha, sw);
-		if (ret != 1)
+		if (ret != 1) {
+			pm_runtime_put(ha->dev);
 			sas_free_event(to_asd_sas_event(&sw->work));
+		}
 	}
 	spin_unlock_irq(&ha->lock);
 }
@@ -126,16 +128,22 @@ void sas_enable_revalidation(struct sas_ha_struct *ha)
 static void sas_port_event_worker(struct work_struct *work)
 {
 	struct asd_sas_event *ev = to_asd_sas_event(work);
+	struct asd_sas_phy *phy = ev->phy;
+	struct sas_ha_struct *ha = phy->ha;
 
 	sas_port_event_fns[ev->event](work);
+	pm_runtime_put(ha->dev);
 	sas_free_event(ev);
 }
 
 static void sas_phy_event_worker(struct work_struct *work)
 {
 	struct asd_sas_event *ev = to_asd_sas_event(work);
+	struct asd_sas_phy *phy = ev->phy;
+	struct sas_ha_struct *ha = phy->ha;
 
 	sas_phy_event_fns[ev->event](work);
+	pm_runtime_put(ha->dev);
 	sas_free_event(ev);
 }
 
@@ -170,14 +178,19 @@ int sas_notify_port_event(struct asd_sas_phy *phy, enum port_event event,
 	if (!ev)
 		return -ENOMEM;
 
+	/* Call pm_runtime_put() with pairs in sas_port_event_worker() */
+	pm_runtime_get_noresume(ha->dev);
+
 	INIT_SAS_EVENT(ev, sas_port_event_worker, phy, event);
 
 	if (sas_defer_event(phy, ev))
 		return 0;
 
 	ret = sas_queue_event(event, &ev->work, ha);
-	if (ret != 1)
+	if (ret != 1) {
+		pm_runtime_put(ha->dev);
 		sas_free_event(ev);
+	}
 
 	return ret;
 }
@@ -196,14 +209,19 @@ int sas_notify_phy_event(struct asd_sas_phy *phy, enum phy_event event,
 	if (!ev)
 		return -ENOMEM;
 
+	/* Call pm_runtime_put() with pairs in sas_phy_event_worker() */
+	pm_runtime_get_noresume(ha->dev);
+
 	INIT_SAS_EVENT(ev, sas_phy_event_worker, phy, event);
 
 	if (sas_defer_event(phy, ev))
 		return 0;
 
 	ret = sas_queue_event(event, &ev->work, ha);
-	if (ret != 1)
+	if (ret != 1) {
+		pm_runtime_put(ha->dev);
 		sas_free_event(ev);
+	}
 
 	return ret;
 }