Blob Blame History Raw
From 2d299f5bfe212b68bde981153a00926e817ee82a Mon Sep 17 00:00:00 2001
From: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Date: Wed, 23 Nov 2022 15:54:17 -0800
Subject: [PATCH] drm/i915/huc: always init the delayed load fence
Git-commit: 2d299f5bfe212b68bde981153a00926e817ee82a
Patch-mainline: v6.2-rc1
Alt-commit: 14347a9c889fbdbae81e500f6c6e313f5d8e5271
References: git-fixes

The fence is only tracking if the HuC load is in progress or not and
doesn't distinguish between already loaded, not supported or disabled,
so we can always initialize it to completed, no matter the actual
support. We already do that for most platforms, but we skip it on
GTs that lack VCS engines (e.g. MTL root GT), so fix that. Note that the
cleanup is already unconditional.

While at it, move the init/fini to helper functions.

Fixes: 8e5f37828145 ("drm/i915/huc: fix leak of debug object in huc load fence on driver unload")
Signed-off-by: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Cc: John Harrison <John.C.Harrison@Intel.com>
Cc: Alan Previn <alan.previn.teres.alexis@intel.com>
Reviewed-by: John Harrison <John.C.Harrison@Intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20221123235417.1475709-1-daniele.ceraolospurio@intel.com
(cherry picked from commit 14347a9c889fbdbae81e500f6c6e313f5d8e5271)

Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Acked-by: Takashi Iwai <tiwai@suse.de>

---
 drivers/gpu/drm/i915/gt/uc/intel_huc.c |   47 +++++++++++++++++++++++----------
 1 file changed, 34 insertions(+), 13 deletions(-)

--- a/drivers/gpu/drm/i915/gt/uc/intel_huc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_huc.c
@@ -211,12 +211,45 @@ void intel_huc_unregister_gsc_notifier(s
 	huc->delayed_load.nb.notifier_call = NULL;
 }
 
+static void delayed_huc_load_init(struct intel_huc *huc)
+{
+	/*
+	 * Initialize fence to be complete as this is expected to be complete
+	 * unless there is a delayed HuC load in progress.
+	 */
+	i915_sw_fence_init(&huc->delayed_load.fence,
+			   sw_fence_dummy_notify);
+	i915_sw_fence_commit(&huc->delayed_load.fence);
+
+	hrtimer_init(&huc->delayed_load.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+	huc->delayed_load.timer.function = huc_delayed_load_timer_callback;
+}
+
+static void delayed_huc_load_fini(struct intel_huc *huc)
+{
+	/*
+	 * the fence is initialized in init_early, so we need to clean it up
+	 * even if HuC loading is off.
+	 */
+	delayed_huc_load_complete(huc);
+	i915_sw_fence_fini(&huc->delayed_load.fence);
+}
+
 void intel_huc_init_early(struct intel_huc *huc)
 {
 	struct drm_i915_private *i915 = huc_to_gt(huc)->i915;
 
 	intel_uc_fw_init_early(&huc->fw, INTEL_UC_FW_TYPE_HUC);
 
+	/*
+	 * we always init the fence as already completed, even if HuC is not
+	 * supported. This way we don't have to distinguish between HuC not
+	 * supported/disabled or already loaded, and can focus on if the load
+	 * is currently in progress (fence not complete) or not, which is what
+	 * we care about for stalling userspace submissions.
+	 */
+	delayed_huc_load_init(huc);
+
 	if (GRAPHICS_VER(i915) >= 11) {
 		huc->status.reg = GEN11_HUC_KERNEL_LOAD_INFO;
 		huc->status.mask = HUC_LOAD_SUCCESSFUL;
@@ -226,17 +259,6 @@ void intel_huc_init_early(struct intel_h
 		huc->status.mask = HUC_FW_VERIFIED;
 		huc->status.value = HUC_FW_VERIFIED;
 	}
-
-	/*
-	 * Initialize fence to be complete as this is expected to be complete
-	 * unless there is a delayed HuC reload in progress.
-	 */
-	i915_sw_fence_init(&huc->delayed_load.fence,
-			   sw_fence_dummy_notify);
-	i915_sw_fence_commit(&huc->delayed_load.fence);
-
-	hrtimer_init(&huc->delayed_load.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
-	huc->delayed_load.timer.function = huc_delayed_load_timer_callback;
 }
 
 #define HUC_LOAD_MODE_STRING(x) (x ? "GSC" : "legacy")
@@ -304,8 +326,7 @@ void intel_huc_fini(struct intel_huc *hu
 	 * the fence is initialized in init_early, so we need to clean it up
 	 * even if HuC loading is off.
 	 */
-	delayed_huc_load_complete(huc);
-	i915_sw_fence_fini(&huc->delayed_load.fence);
+	delayed_huc_load_fini(huc);
 
 	if (intel_uc_fw_is_loadable(&huc->fw))
 		intel_uc_fw_fini(&huc->fw);