Blob Blame History Raw
From 250af743c044c342916a36517187e6e42a281efe Mon Sep 17 00:00:00 2001
From: Gang Ba <gaba@amd.com>
Date: Fri, 23 Aug 2019 16:01:11 -0400
Subject: Revert "drm/amdgpu: free up the first paging queue v2"
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
Git-commit: 250af743c044c342916a36517187e6e42a281efe
Patch-mainline: v5.4-rc1
References: bsc#1152489

This reverts commit 4f8bc72fbf10f2dc8bca74d5da08b3a981b2e5cd.

It turned out that a single reserved queue wouldn't be
sufficient for page fault handling.

Signed-off-by: Gang Ba <gaba@amd.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Acked-by: Thomas Zimmermann <tzimmermann@suse.de>
---
 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c | 23 ++++++++---------------
 1 file changed, 8 insertions(+), 15 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
index 9b7e660828c4..1f7c6c2d6e79 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
@@ -2503,8 +2503,8 @@ static const struct amdgpu_buffer_funcs sdma_v4_0_buffer_funcs = {
 static void sdma_v4_0_set_buffer_funcs(struct amdgpu_device *adev)
 {
 	adev->mman.buffer_funcs = &sdma_v4_0_buffer_funcs;
-	if (adev->sdma.has_page_queue && adev->sdma.num_instances > 1)
-		adev->mman.buffer_funcs_ring = &adev->sdma.instance[1].page;
+	if (adev->sdma.has_page_queue)
+		adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].page;
 	else
 		adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring;
 }
@@ -2523,22 +2523,15 @@ static void sdma_v4_0_set_vm_pte_funcs(struct amdgpu_device *adev)
 	unsigned i;
 
 	adev->vm_manager.vm_pte_funcs = &sdma_v4_0_vm_pte_funcs;
-	if (adev->sdma.has_page_queue && adev->sdma.num_instances > 1) {
-		for (i = 1; i < adev->sdma.num_instances; i++) {
+	for (i = 0; i < adev->sdma.num_instances; i++) {
+		if (adev->sdma.has_page_queue)
 			sched = &adev->sdma.instance[i].page.sched;
-			adev->vm_manager.vm_pte_rqs[i - 1] =
-				&sched->sched_rq[DRM_SCHED_PRIORITY_KERNEL];
-		}
-		adev->vm_manager.vm_pte_num_rqs = adev->sdma.num_instances - 1;
-		adev->vm_manager.page_fault = &adev->sdma.instance[0].page;
-	} else {
-		for (i = 0; i < adev->sdma.num_instances; i++) {
+		else
 			sched = &adev->sdma.instance[i].ring.sched;
-			adev->vm_manager.vm_pte_rqs[i] =
-				&sched->sched_rq[DRM_SCHED_PRIORITY_KERNEL];
-		}
-		adev->vm_manager.vm_pte_num_rqs = adev->sdma.num_instances;
+		adev->vm_manager.vm_pte_rqs[i] =
+			&sched->sched_rq[DRM_SCHED_PRIORITY_KERNEL];
 	}
+	adev->vm_manager.vm_pte_num_rqs = adev->sdma.num_instances;
 }
 
 const struct amdgpu_ip_block_version sdma_v4_0_ip_block = {
-- 
2.28.0