Blob Blame History Raw
From: Zhi Wang <zhi.a.wang@intel.com>
Date: Wed, 7 Feb 2018 18:12:15 +0800
Subject: drm/i915/gvt: Update PDPs after a vGPU mm object is pinned.
Git-commit: b20c0d5ce1047ba03a6709a07f31f4d7178de35c
Patch-mainline: v4.17-rc1
References: FATE#326289 FATE#326079 FATE#326049 FATE#322398 FATE#326166

The PDPs of a shadow page will only be valid after a vGPU mm is pinned.
So the PDPs in the shadow context should be updated then.

Signed-off-by: Zhi Wang <zhi.a.wang@intel.com>
Signed-off-by: Zhenyu Wang <zhenyuw@linux.intel.com>
Acked-by: Petr Tesarik <ptesarik@suse.com>
---
 drivers/gpu/drm/i915/gvt/scheduler.c |   28 +++++++++++++++++++++++++---
 1 file changed, 25 insertions(+), 3 deletions(-)

--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -52,6 +52,29 @@ static void set_context_pdp_root_pointer
 		pdp_pair[i].val = pdp[7 - i];
 }
 
+static void update_shadow_pdps(struct intel_vgpu_workload *workload)
+{
+	struct intel_vgpu *vgpu = workload->vgpu;
+	int ring_id = workload->ring_id;
+	struct i915_gem_context *shadow_ctx = vgpu->submission.shadow_ctx;
+	struct drm_i915_gem_object *ctx_obj =
+		shadow_ctx->engine[ring_id].state->obj;
+	struct execlist_ring_context *shadow_ring_context;
+	struct page *page;
+
+	if (WARN_ON(!workload->shadow_mm))
+		return;
+
+	if (WARN_ON(!atomic_read(&workload->shadow_mm->pincount)))
+		return;
+
+	page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
+	shadow_ring_context = kmap(page);
+	set_context_pdp_root_pointer(shadow_ring_context,
+			(void *)workload->shadow_mm->ppgtt_mm.shadow_pdps);
+	kunmap(page);
+}
+
 /*
  * when populating shadow ctx from guest, we should not overrride oa related
  * registers, so that they will not be overlapped by guest oa configs. Thus
@@ -161,9 +184,6 @@ static int populate_shadow_context(struc
 	}
 #undef COPY_REG
 
-	set_context_pdp_root_pointer(shadow_ring_context,
-				     (void *)workload->shadow_mm->ppgtt_mm.shadow_pdps);
-
 	intel_gvt_hypervisor_read_gpa(vgpu,
 			workload->ring_context_gpa +
 			sizeof(*shadow_ring_context),
@@ -570,6 +590,8 @@ static int prepare_workload(struct intel
 		return ret;
 	}
 
+	update_shadow_pdps(workload);
+
 	ret = intel_vgpu_sync_oos_pages(workload->vgpu);
 	if (ret) {
 		gvt_vgpu_err("fail to vgpu sync oos pages\n");