Blob Blame History Raw
From: Yifan Zhang <yifan1.zhang@amd.com>
Date: Tue, 17 Aug 2021 16:47:26 +0800
Subject: drm/amdkfd: export svm_range_list_lock_and_flush_work
Git-commit: 6bdfc37b5cccc12e54e7019907d7eb4ff9e741fb
Patch-mainline: v5.16-rc1
References: jsc#PED-1294, bsc#1204363, CVE-2022-3523

export svm_range_list_lock_and_flush_work to make other kfd parts be
able to sync svm_range_list.

Signed-off-by: Yifan Zhang <yifan1.zhang@amd.com>
Reviewed-by: Felix Kuehling <Felix.Kuehling@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
---
 drivers/gpu/drm/amd/amdkfd/kfd_svm.c |    2 +-
 drivers/gpu/drm/amd/amdkfd/kfd_svm.h |    1 +
 2 files changed, 2 insertions(+), 1 deletion(-)

--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
@@ -1543,7 +1543,7 @@ unreserve_out:
  * Context: Returns with mmap write lock held, pending deferred work flushed
  *
  */
-static void
+void
 svm_range_list_lock_and_flush_work(struct svm_range_list *svms,
 				   struct mm_struct *mm)
 {
--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.h
@@ -188,6 +188,7 @@ void svm_range_prefault(struct svm_range
 			void *owner);
 struct kfd_process_device *
 svm_range_get_pdd_by_adev(struct svm_range *prange, struct amdgpu_device *adev);
+void svm_range_list_lock_and_flush_work(struct svm_range_list *svms, struct mm_struct *mm);
 
 /* SVM API and HMM page migration work together, device memory type
  * is initialized to not 0 when page migration register device memory.