Blob Blame History Raw
From: Zhenyu Wang <zhenyuw@linux.intel.com>
Date: Thu, 30 Aug 2018 11:33:43 +0800
Subject: drm/i915/gvt: Give new born vGPU higher scheduling chance
Git-commit: 54ff01fd0d44b9681615f77c15fe9ea6dfadb501
Patch-mainline: v4.19-rc3
References: FATE#326289 FATE#326079 FATE#326049 FATE#322398 FATE#326166

This trys to give new born vGPU with higher scheduling chance
not only with adding to sched list head and also have higher
priority for workload sched for 2 seconds after starting to
schedule it. In order for fast GPU execution during VM boot,
and ensure guest driver setup with required state given in time.

This fixes recent failure seen on one VM with multiple linux VMs
running on kernel with commit 2621cefaa42b3("drm/i915: Provide a timeout to i915_gem_wait_for_idle() on setup"),
which had shorter setup timeout that caused context state init failed.

v2: change to 2s for higher scheduling period

Cc: Yuan Hang <hang.yuan@intel.com>
Reviewed-by: Hang Yuan <hang.yuan@intel.com>
Signed-off-by: Zhenyu Wang <zhenyuw@linux.intel.com>
Acked-by: Petr Tesarik <ptesarik@suse.com>
---
 drivers/gpu/drm/i915/gvt/sched_policy.c |   34 +++++++++++++++++++++++++-------
 1 file changed, 27 insertions(+), 7 deletions(-)

--- a/drivers/gpu/drm/i915/gvt/sched_policy.c
+++ b/drivers/gpu/drm/i915/gvt/sched_policy.c
@@ -47,11 +47,15 @@ static bool vgpu_has_pending_workload(st
 	return false;
 }
 
+/* We give 2 seconds higher prio for vGPU during start */
+#define GVT_SCHED_VGPU_PRI_TIME  2
+
 struct vgpu_sched_data {
 	struct list_head lru_list;
 	struct intel_vgpu *vgpu;
 	bool active;
-
+	bool pri_sched;
+	ktime_t pri_time;
 	ktime_t sched_in_time;
 	ktime_t sched_time;
 	ktime_t left_ts;
@@ -183,6 +187,14 @@ static struct intel_vgpu *find_busy_vgpu
 		if (!vgpu_has_pending_workload(vgpu_data->vgpu))
 			continue;
 
+		if (vgpu_data->pri_sched) {
+			if (ktime_before(ktime_get(), vgpu_data->pri_time)) {
+				vgpu = vgpu_data->vgpu;
+				break;
+			} else
+				vgpu_data->pri_sched = false;
+		}
+
 		/* Return the vGPU only if it has time slice left */
 		if (vgpu_data->left_ts > 0) {
 			vgpu = vgpu_data->vgpu;
@@ -202,6 +214,7 @@ static void tbs_sched_func(struct gvt_sc
 	struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
 	struct vgpu_sched_data *vgpu_data;
 	struct intel_vgpu *vgpu = NULL;
+
 	/* no active vgpu or has already had a target */
 	if (list_empty(&sched_data->lru_runq_head) || scheduler->next_vgpu)
 		goto out;
@@ -209,12 +222,13 @@ static void tbs_sched_func(struct gvt_sc
 	vgpu = find_busy_vgpu(sched_data);
 	if (vgpu) {
 		scheduler->next_vgpu = vgpu;
-
-		/* Move the last used vGPU to the tail of lru_list */
 		vgpu_data = vgpu->sched_data;
-		list_del_init(&vgpu_data->lru_list);
-		list_add_tail(&vgpu_data->lru_list,
-				&sched_data->lru_runq_head);
+		if (!vgpu_data->pri_sched) {
+			/* Move the last used vGPU to the tail of lru_list */
+			list_del_init(&vgpu_data->lru_list);
+			list_add_tail(&vgpu_data->lru_list,
+				      &sched_data->lru_runq_head);
+		}
 	} else {
 		scheduler->next_vgpu = gvt->idle_vgpu;
 	}
@@ -328,11 +342,17 @@ static void tbs_sched_start_schedule(str
 {
 	struct gvt_sched_data *sched_data = vgpu->gvt->scheduler.sched_data;
 	struct vgpu_sched_data *vgpu_data = vgpu->sched_data;
+	ktime_t now;
 
 	if (!list_empty(&vgpu_data->lru_list))
 		return;
 
-	list_add_tail(&vgpu_data->lru_list, &sched_data->lru_runq_head);
+	now = ktime_get();
+	vgpu_data->pri_time = ktime_add(now,
+					ktime_set(GVT_SCHED_VGPU_PRI_TIME, 0));
+	vgpu_data->pri_sched = true;
+
+	list_add(&vgpu_data->lru_list, &sched_data->lru_runq_head);
 
 	if (!hrtimer_active(&sched_data->timer))
 		hrtimer_start(&sched_data->timer, ktime_add_ns(ktime_get(),