Blob Blame History Raw
From: Yong Zhao <yong.zhao@amd.com>
Date: Wed, 27 Sep 2017 00:09:51 -0400
Subject: drm/amdkfd: Avoid submitting an unnecessary packet to HWS
Git-commit: c4744e243c346b9e96857c6d11c716d31d475165
Patch-mainline: v4.15-rc1
References: FATE#326289 FATE#326079 FATE#326049 FATE#322398 FATE#326166

v2:
Make queue mapping interfaces more consistent by passing unmap filter
parameters directly to execute_queues_cpsch, same as unmap_queues_cpsch.

Signed-off-by: Yong Zhao <yong.zhao@amd.com>
Signed-off-by: Felix Kuehling <Felix.Kuehling@amd.com>
Reviewed-by: Oded Gabbay <oded.gabbay@gmail.com>
Signed-off-by: Oded Gabbay <oded.gabbay@gmail.com>
Acked-by: Petr Tesarik <ptesarik@suse.com>
---
 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c |   26 +++++++++---------
 1 file changed, 14 insertions(+), 12 deletions(-)

--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
@@ -44,7 +44,9 @@ static int create_compute_queue_nocpsch(
 					struct queue *q,
 					struct qcm_process_device *qpd);
 
-static int execute_queues_cpsch(struct device_queue_manager *dqm);
+static int execute_queues_cpsch(struct device_queue_manager *dqm,
+				enum kfd_unmap_queues_filter filter,
+				uint32_t filter_param);
 static int unmap_queues_cpsch(struct device_queue_manager *dqm,
 				enum kfd_unmap_queues_filter filter,
 				uint32_t filter_param);
@@ -729,7 +731,7 @@ static int start_cpsch(struct device_que
 	init_interrupts(dqm);
 
 	mutex_lock(&dqm->lock);
-	execute_queues_cpsch(dqm);
+	execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
 	mutex_unlock(&dqm->lock);
 
 	return 0;
@@ -775,7 +777,7 @@ static int create_kernel_queue_cpsch(str
 	list_add(&kq->list, &qpd->priv_queue_list);
 	dqm->queue_count++;
 	qpd->is_debug = true;
-	execute_queues_cpsch(dqm);
+	execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
 	mutex_unlock(&dqm->lock);
 
 	return 0;
@@ -786,12 +788,10 @@ static void destroy_kernel_queue_cpsch(s
 					struct qcm_process_device *qpd)
 {
 	mutex_lock(&dqm->lock);
-	/* here we actually preempt the DIQ */
-	unmap_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0);
 	list_del(&kq->list);
 	dqm->queue_count--;
 	qpd->is_debug = false;
-	execute_queues_cpsch(dqm);
+	execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0);
 	/*
 	 * Unconditionally decrement this counter, regardless of the queue's
 	 * type.
@@ -850,7 +850,8 @@ static int create_queue_cpsch(struct dev
 	list_add(&q->list, &qpd->queues_list);
 	if (q->properties.is_active) {
 		dqm->queue_count++;
-		retval = execute_queues_cpsch(dqm);
+		retval = execute_queues_cpsch(dqm,
+				KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
 	}
 
 	if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
@@ -960,14 +961,15 @@ static int unmap_queues_cpsch(struct dev
 }
 
 /* dqm->lock mutex has to be locked before calling this function */
-static int execute_queues_cpsch(struct device_queue_manager *dqm)
+static int execute_queues_cpsch(struct device_queue_manager *dqm,
+				enum kfd_unmap_queues_filter filter,
+				uint32_t filter_param)
 {
 	int retval;
 
-	retval = unmap_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES,
-			0);
+	retval = unmap_queues_cpsch(dqm, filter, filter_param);
 	if (retval) {
-		pr_err("The cp might be in an unrecoverable state due to an unsuccessful queues preemption");
+		pr_err("The cp might be in an unrecoverable state due to an unsuccessful queues preemption\n");
 		return retval;
 	}
 
@@ -1013,7 +1015,7 @@ static int destroy_queue_cpsch(struct de
 	if (q->properties.is_active)
 		dqm->queue_count--;
 
-	execute_queues_cpsch(dqm);
+	execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
 
 	mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj);