Blob Blame History Raw
From c9214008b52b20ba2a88dad6ba92f1a19c533db6 Mon Sep 17 00:00:00 2001
From: fred gao <fred.gao@intel.com>
Date: Tue, 19 Sep 2017 15:11:29 +0800
Subject: [PATCH] drm/i915/gvt: Add VM healthy check for submit_context
Git-commit: c9214008b52b20ba2a88dad6ba92f1a19c533db6
Patch-mainline: v4.16-rc1
References: FATE#322643 bsc#1055900

When a scan error occurs in submit_context, this patch is to
decrease the mm ref count and free the workload struct before
the workload is abandoned.

V2: 
- submit_context related code should be combined together. (Zhenyu)

V3: 
- free all the unsubmitted workloads. (Zhenyu)

V4: 
- refine the clean path. (Zhenyu)

V5: 
- polish the title. (Zhenyu)

Signed-off-by: fred gao <fred.gao@intel.com>
Signed-off-by: Zhenyu Wang <zhenyuw@linux.intel.com>
Acked-by: Takashi Iwai <tiwai@suse.de>

---
 drivers/gpu/drm/i915/gvt/execlist.c |   27 +++++++++++++++------------
 1 file changed, 15 insertions(+), 12 deletions(-)

--- a/drivers/gpu/drm/i915/gvt/execlist.c
+++ b/drivers/gpu/drm/i915/gvt/execlist.c
@@ -360,7 +360,6 @@ static int emulate_execlist_schedule_in(
 
 static void free_workload(struct intel_vgpu_workload *workload)
 {
-	intel_vgpu_unpin_mm(workload->shadow_mm);
 	intel_gvt_mm_unreference(workload->shadow_mm);
 	kmem_cache_free(workload->vgpu->submission.workloads, workload);
 }
@@ -546,7 +545,7 @@ static int complete_execlist_workload(st
 	struct intel_vgpu_workload *next_workload;
 	struct list_head *next = workload_q_head(vgpu, ring_id)->next;
 	bool lite_restore = false;
-	int ret;
+	int ret = 0;
 
 	gvt_dbg_el("complete workload %p status %d\n", workload,
 			workload->status);
@@ -587,17 +586,12 @@ static int complete_execlist_workload(st
 
 	if (lite_restore) {
 		gvt_dbg_el("next context == current - no schedule-out\n");
-		free_workload(workload);
-		return 0;
+		goto out;
 	}
 
 	ret = emulate_execlist_ctx_schedule_out(execlist, &workload->ctx_desc);
-	if (ret)
-		goto err;
 out:
-	free_workload(workload);
-	return 0;
-err:
+	intel_vgpu_unpin_mm(workload->shadow_mm);
 	free_workload(workload);
 	return ret;
 }
@@ -768,13 +762,22 @@ static int submit_context(struct intel_v
 	if (list_empty(workload_q_head(vgpu, ring_id))) {
 		intel_runtime_pm_get(dev_priv);
 		mutex_lock(&dev_priv->drm.struct_mutex);
-		intel_gvt_scan_and_shadow_workload(workload);
+		ret = intel_gvt_scan_and_shadow_workload(workload);
 		mutex_unlock(&dev_priv->drm.struct_mutex);
 		intel_runtime_pm_put(dev_priv);
 	}
 
-	queue_workload(workload);
-	return 0;
+	if (ret == 0)
+		queue_workload(workload);
+	else {
+		free_workload(workload);
+		if (vgpu_is_vm_unhealthy(ret)) {
+			intel_vgpu_clean_execlist(vgpu);
+			enter_failsafe_mode(vgpu, GVT_FAILSAFE_GUEST_ERR);
+		}
+	}
+	return ret;
+
 }
 
 int intel_vgpu_submit_execlist(struct intel_vgpu *vgpu, int ring_id)