Blob Blame History Raw
From d0302e74003bf1f0fc41c06948b745204c4704ea Mon Sep 17 00:00:00 2001
From: Ping Gao <ping.a.gao@intel.com>
Date: Thu, 29 Jun 2017 12:22:43 +0800
Subject: [PATCH] drm/i915/gvt: Audit and shadow workload during ELSP writing
Git-commit: d0302e74003bf1f0fc41c06948b745204c4704ea
Patch-mainline: v4.14-rc1
References: FATE#322643 bsc#1055900

Let the workload audit and shadow ahead of vGPU scheduling, that
will eliminate GPU idle time and improve performance for multi-VM.

The performance of Heaven running simultaneously in 3VMs has
improved 20% after this patch.

v2:Remove condition current->vgpu==vgpu when shadow during ELSP
writing.

Signed-off-by: Ping Gao <ping.a.gao@intel.com>
Reviewed-by: Zhi Wang <zhi.a.wang@intel.com>
Signed-off-by: Zhenyu Wang <zhenyuw@linux.intel.com>
Acked-by: Takashi Iwai <tiwai@suse.de>

---
 drivers/gpu/drm/i915/gvt/execlist.c  |   11 +++++++++++
 drivers/gpu/drm/i915/gvt/scheduler.c |    7 +++++++
 drivers/gpu/drm/i915/gvt/scheduler.h |    1 +
 3 files changed, 19 insertions(+)

--- a/drivers/gpu/drm/i915/gvt/execlist.c
+++ b/drivers/gpu/drm/i915/gvt/execlist.c
@@ -622,6 +622,7 @@ static int submit_context(struct intel_v
 	struct list_head *q = workload_q_head(vgpu, ring_id);
 	struct intel_vgpu_workload *last_workload = get_last_workload(q);
 	struct intel_vgpu_workload *workload = NULL;
+	struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
 	u64 ring_context_gpa;
 	u32 head, tail, start, ctl, ctx_ctl, per_ctx, indirect_ctx;
 	int ret;
@@ -685,6 +686,7 @@ static int submit_context(struct intel_v
 	workload->complete = complete_execlist_workload;
 	workload->status = -EINPROGRESS;
 	workload->emulate_schedule_in = emulate_schedule_in;
+	workload->shadowed = false;
 
 	if (ring_id == RCS) {
 		intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
@@ -718,6 +720,15 @@ static int submit_context(struct intel_v
 		return ret;
 	}
 
+	/* Only scan and shadow the first workload in the queue
+	 * as there is only one pre-allocated buf-obj for shadow.
+	 */
+	if (list_empty(workload_q_head(vgpu, ring_id))) {
+		mutex_lock(&dev_priv->drm.struct_mutex);
+		intel_gvt_scan_and_shadow_workload(workload);
+		mutex_unlock(&dev_priv->drm.struct_mutex);
+	}
+
 	queue_workload(workload);
 	return 0;
 }
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -201,6 +201,9 @@ int intel_gvt_scan_and_shadow_workload(s
 	struct intel_vgpu *vgpu = workload->vgpu;
 	int ret;
 
+	if (workload->shadowed)
+		return 0;
+
 	shadow_ctx->desc_template &= ~(0x3 << GEN8_CTX_ADDRESSING_MODE_SHIFT);
 	shadow_ctx->desc_template |= workload->ctx_desc.addressing_mode <<
 				    GEN8_CTX_ADDRESSING_MODE_SHIFT;
@@ -228,6 +231,10 @@ int intel_gvt_scan_and_shadow_workload(s
 	}
 
 	ret = populate_shadow_context(workload);
+	if (ret)
+		goto out;
+
+	workload->shadowed = true;
 
 out:
 	return ret;
--- a/drivers/gpu/drm/i915/gvt/scheduler.h
+++ b/drivers/gpu/drm/i915/gvt/scheduler.h
@@ -82,6 +82,7 @@ struct intel_vgpu_workload {
 	struct drm_i915_gem_request *req;
 	/* if this workload has been dispatched to i915? */
 	bool dispatched;
+	bool shadowed;
 	int status;
 
 	struct intel_vgpu_mm *shadow_mm;