Blob Blame History Raw
From: =?UTF-8?q?Christian=20K=C3=B6nig?= <christian.koenig@amd.com>
Date: Thu, 21 Dec 2017 15:47:28 +0100
Subject: drm/amdgpu: simplify huge page handling
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
Git-commit: 3cc1d3ea4a6a81a7108fe27fa42efd86ea64061d
Patch-mainline: v4.16-rc1
References: FATE#326289 FATE#326079 FATE#326049 FATE#322398 FATE#326166

Update the PDEs after resetting the huge flag.

Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Chunming Zhou <david1.zhou@amd.com>
Acked-by: Felix Kuehling <Felix.Kuehling@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Acked-by: Petr Tesarik <ptesarik@suse.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c |   60 +++++++++------------------------
 1 file changed, 18 insertions(+), 42 deletions(-)

--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -946,54 +946,38 @@ static void amdgpu_vm_handle_huge_pages(
 					unsigned nptes, uint64_t dst,
 					uint64_t flags)
 {
-	bool use_cpu_update = (p->func == amdgpu_vm_cpu_set_ptes);
 	uint64_t pd_addr, pde;
 
 	/* In the case of a mixed PT the PDE must point to it*/
-	if (p->adev->asic_type < CHIP_VEGA10 || p->src ||
-	    nptes != AMDGPU_VM_PTE_COUNT(p->adev)) {
-		dst = amdgpu_bo_gpu_offset(entry->base.bo);
-		flags = AMDGPU_PTE_VALID;
-	} else {
+	if (p->adev->asic_type >= CHIP_VEGA10 && !p->src &&
+	    nptes == AMDGPU_VM_PTE_COUNT(p->adev)) {
 		/* Set the huge page flag to stop scanning at this PDE */
 		flags |= AMDGPU_PDE_PTE;
 	}
 
-	if (!entry->huge && !(flags & AMDGPU_PDE_PTE))
+	if (!(flags & AMDGPU_PDE_PTE)) {
+		if (entry->huge) {
+			/* Add the entry to the relocated list to update it. */
+			entry->huge = false;
+			spin_lock(&p->vm->status_lock);
+			list_move(&entry->base.vm_status, &p->vm->relocated);
+			spin_unlock(&p->vm->status_lock);
+		}
 		return;
-	entry->huge = !!(flags & AMDGPU_PDE_PTE);
+	}
 
+	entry->huge = true;
 	amdgpu_gart_get_vm_pde(p->adev, AMDGPU_VM_PDB0,
 			       &dst, &flags);
 
-	if (use_cpu_update) {
-		/* In case a huge page is replaced with a system
-		 * memory mapping, p->pages_addr != NULL and
-		 * amdgpu_vm_cpu_set_ptes would try to translate dst
-		 * through amdgpu_vm_map_gart. But dst is already a
-		 * GPU address (of the page table). Disable
-		 * amdgpu_vm_map_gart temporarily.
-		 */
-		dma_addr_t *tmp;
-
-		tmp = p->pages_addr;
-		p->pages_addr = NULL;
-
-		pd_addr = (unsigned long)amdgpu_bo_kptr(parent->base.bo);
+	if (parent->base.bo->shadow) {
+		pd_addr = amdgpu_bo_gpu_offset(parent->base.bo->shadow);
 		pde = pd_addr + (entry - parent->entries) * 8;
-		amdgpu_vm_cpu_set_ptes(p, pde, dst, 1, 0, flags);
-
-		p->pages_addr = tmp;
-	} else {
-		if (parent->base.bo->shadow) {
-			pd_addr = amdgpu_bo_gpu_offset(parent->base.bo->shadow);
-			pde = pd_addr + (entry - parent->entries) * 8;
-			amdgpu_vm_do_set_ptes(p, pde, dst, 1, 0, flags);
-		}
-		pd_addr = amdgpu_bo_gpu_offset(parent->base.bo);
-		pde = pd_addr + (entry - parent->entries) * 8;
-		amdgpu_vm_do_set_ptes(p, pde, dst, 1, 0, flags);
+		p->func(p, pde, dst, 1, 0, flags);
 	}
+	pd_addr = amdgpu_bo_gpu_offset(parent->base.bo);
+	pde = pd_addr + (entry - parent->entries) * 8;
+	p->func(p, pde, dst, 1, 0, flags);
 }
 
 /**
@@ -1205,12 +1189,6 @@ static int amdgpu_vm_bo_update_mapping(s
 	/* padding, etc. */
 	ndw = 64;
 
-	/* one PDE write for each huge page */
-	if (vm->root.base.bo->shadow)
-		ndw += ((nptes >> adev->vm_manager.block_size) + 1) * 6 * 2;
-	else
-		ndw += ((nptes >> adev->vm_manager.block_size) + 1) * 6;
-
 	if (pages_addr) {
 		/* copy commands needed */
 		ndw += ncmds * adev->vm_manager.vm_pte_funcs->copy_pte_num_dw;
@@ -1285,8 +1263,6 @@ static int amdgpu_vm_bo_update_mapping(s
 
 error_free:
 	amdgpu_job_free(job);
-	amdgpu_vm_invalidate_level(adev, vm, &vm->root,
-				   adev->vm_manager.root_level);
 	return r;
 }