From 21527a8dafc40fc499ae57492c1c5d0098cbcf08 Mon Sep 17 00:00:00 2001
From: Zhi Wang <zhi.a.wang@intel.com>
Date: Tue, 12 Sep 2017 21:42:09 +0800
Subject: [PATCH] drm/i915/gvt: Factor out vGPU workload creation/destroy
Git-commit: 21527a8dafc40fc499ae57492c1c5d0098cbcf08
Patch-mainline: v4.16-rc1
References: FATE#322643 bsc#1055900
Factor out vGPU workload creation/destroy functions since they are not
specific to execlist emulation.
Signed-off-by: Zhi Wang <zhi.a.wang@intel.com>
Acked-by: Takashi Iwai <tiwai@suse.de>
---
drivers/gpu/drm/i915/gvt/execlist.c | 27 ++++--------------
drivers/gpu/drm/i915/gvt/scheduler.c | 51 +++++++++++++++++++++++++++++++++++
drivers/gpu/drm/i915/gvt/scheduler.h | 5 +++
3 files changed, 62 insertions(+), 21 deletions(-)
--- a/drivers/gpu/drm/i915/gvt/execlist.c
+++ b/drivers/gpu/drm/i915/gvt/execlist.c
@@ -358,12 +358,6 @@ static int emulate_execlist_schedule_in(
return 0;
}
-static void free_workload(struct intel_vgpu_workload *workload)
-{
- intel_gvt_mm_unreference(workload->shadow_mm);
- kmem_cache_free(workload->vgpu->submission.workloads, workload);
-}
-
#define get_desc_from_elsp_dwords(ed, i) \
((struct execlist_ctx_descriptor_format *)&((ed)->data[i * 2]))
@@ -592,7 +586,7 @@ static int complete_execlist_workload(st
ret = emulate_execlist_ctx_schedule_out(execlist, &workload->ctx_desc);
out:
intel_vgpu_unpin_mm(workload->shadow_mm);
- free_workload(workload);
+ intel_vgpu_destroy_workload(workload);
return ret;
}
@@ -693,10 +687,6 @@ static int submit_context(struct intel_v
gvt_dbg_el("ring id %d begin a new workload\n", ring_id);
- workload = kmem_cache_zalloc(s->workloads, GFP_KERNEL);
- if (!workload)
- return -ENOMEM;
-
/* record some ring buffer register values for scan and shadow */
intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
RING_CTX_OFF(rb_start.val), &start, 4);
@@ -705,13 +695,10 @@ static int submit_context(struct intel_v
intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
RING_CTX_OFF(ctx_ctrl.val), &ctx_ctl, 4);
- INIT_LIST_HEAD(&workload->list);
- INIT_LIST_HEAD(&workload->shadow_bb);
-
- init_waitqueue_head(&workload->shadow_ctx_status_wq);
- atomic_set(&workload->shadow_ctx_active, 0);
+ workload = intel_vgpu_create_workload(vgpu);
+ if (IS_ERR(workload))
+ return PTR_ERR(workload);
- workload->vgpu = vgpu;
workload->ring_id = ring_id;
workload->ctx_desc = *desc;
workload->ring_context_gpa = ring_context_gpa;
@@ -721,9 +708,7 @@ static int submit_context(struct intel_v
workload->rb_ctl = ctl;
workload->prepare = prepare_execlist_workload;
workload->complete = complete_execlist_workload;
- workload->status = -EINPROGRESS;
workload->emulate_schedule_in = emulate_schedule_in;
- workload->shadowed = false;
if (ring_id == RCS) {
intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
@@ -770,7 +755,7 @@ static int submit_context(struct intel_v
if (ret == 0)
queue_workload(workload);
else {
- free_workload(workload);
+ intel_vgpu_destroy_workload(workload);
if (vgpu_is_vm_unhealthy(ret)) {
intel_vgpu_clean_execlist(vgpu);
enter_failsafe_mode(vgpu, GVT_FAILSAFE_GUEST_ERR);
@@ -859,7 +844,7 @@ static void clean_workloads(struct intel
list_for_each_entry_safe(pos, n,
&s->workload_q_head[engine->id], list) {
list_del_init(&pos->list);
- free_workload(pos);
+ intel_vgpu_destroy_workload(pos);
}
clear_bit(engine->id, s->shadow_ctx_desc_updated);
}
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -819,3 +819,54 @@ out_shadow_ctx:
i915_gem_context_put(s->shadow_ctx);
return ret;
}
+
+/**
+ * intel_vgpu_destroy_workload - destroy a vGPU workload
+ * @vgpu: a vGPU
+ *
+ * This function is called when destroy a vGPU workload.
+ *
+ */
+void intel_vgpu_destroy_workload(struct intel_vgpu_workload *workload)
+{
+ struct intel_vgpu_submission *s = &workload->vgpu->submission;
+
+ if (workload->shadow_mm)
+ intel_gvt_mm_unreference(workload->shadow_mm);
+
+ kmem_cache_free(s->workloads, workload);
+}
+
+/**
+ * intel_vgpu_create_workload - create a vGPU workload
+ * @vgpu: a vGPU
+ *
+ * This function is called when creating a vGPU workload.
+ *
+ * Returns:
+ * struct intel_vgpu_workload * on success, negative error code in
+ * pointer if failed.
+ *
+ */
+struct intel_vgpu_workload *
+intel_vgpu_create_workload(struct intel_vgpu *vgpu)
+{
+ struct intel_vgpu_submission *s = &vgpu->submission;
+ struct intel_vgpu_workload *workload;
+
+ workload = kmem_cache_zalloc(s->workloads, GFP_KERNEL);
+ if (!workload)
+ return ERR_PTR(-ENOMEM);
+
+ INIT_LIST_HEAD(&workload->list);
+ INIT_LIST_HEAD(&workload->shadow_bb);
+
+ init_waitqueue_head(&workload->shadow_ctx_status_wq);
+ atomic_set(&workload->shadow_ctx_active, 0);
+
+ workload->status = -EINPROGRESS;
+ workload->shadowed = false;
+ workload->vgpu = vgpu;
+
+ return workload;
+}
--- a/drivers/gpu/drm/i915/gvt/scheduler.h
+++ b/drivers/gpu/drm/i915/gvt/scheduler.h
@@ -141,6 +141,11 @@ int intel_vgpu_setup_submission(struct i
void intel_vgpu_clean_submission(struct intel_vgpu *vgpu);
+struct intel_vgpu_workload *
+intel_vgpu_create_workload(struct intel_vgpu *vgpu);
+
+void intel_vgpu_destroy_workload(struct intel_vgpu_workload *workload);
+
void release_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx);
int intel_gvt_generate_request(struct intel_vgpu_workload *workload);