Blob Blame History Raw
From: Nayan Deshmukh <nayan26deshmukh@gmail.com>
Date: Fri, 20 Jul 2018 17:51:05 +0530
Subject: drm/scheduler: modify API to avoid redundancy
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
Git-commit: cdc50176597cb44ce25eb7331c450058775b8d2a
Patch-mainline: v4.19-rc1
References: FATE#326289 FATE#326079 FATE#326049 FATE#322398 FATE#326166

entity has a scheduler field and we don't need the sched argument
in any of the functions where entity is provided.

Signed-off-by: Nayan Deshmukh <nayan26deshmukh@gmail.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Eric Anholt <eric@anholt.net>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Acked-by: Petr Tesarik <ptesarik@suse.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c    |    2 +-
 drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c   |   13 +++++--------
 drivers/gpu/drm/amd/amdgpu/amdgpu_job.c   |    2 +-
 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c   |    3 +--
 drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c   |    3 +--
 drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c   |    2 +-
 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c    |    4 ++--
 drivers/gpu/drm/etnaviv/etnaviv_drv.c     |    3 +--
 drivers/gpu/drm/etnaviv/etnaviv_sched.c   |    4 ++--
 drivers/gpu/drm/scheduler/gpu_scheduler.c |   20 +++++++++++---------
 drivers/gpu/drm/v3d/v3d_drv.c             |    4 +---
 drivers/gpu/drm/v3d/v3d_gem.c             |    2 --
 include/drm/gpu_scheduler.h               |   10 +++-------
 13 files changed, 30 insertions(+), 42 deletions(-)

--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -1232,7 +1232,7 @@ static int amdgpu_cs_submit(struct amdgp
 	job = p->job;
 	p->job = NULL;
 
-	r = drm_sched_job_init(&job->base, &ring->sched, entity, p->filp);
+	r = drm_sched_job_init(&job->base, entity, p->filp);
 	if (r) {
 		amdgpu_job_free(job);
 		amdgpu_mn_unlock(p->mn);
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
@@ -104,8 +104,7 @@ static int amdgpu_ctx_init(struct amdgpu
 
 failed:
 	for (j = 0; j < i; j++)
-		drm_sched_entity_destroy(&adev->rings[j]->sched,
-				      &ctx->rings[j].entity);
+		drm_sched_entity_destroy(&ctx->rings[j].entity);
 	kfree(ctx->fences);
 	ctx->fences = NULL;
 	return r;
@@ -178,8 +177,7 @@ static void amdgpu_ctx_do_release(struct
 		if (ctx->adev->rings[i] == &ctx->adev->gfx.kiq.ring)
 			continue;
 
-		drm_sched_entity_destroy(&ctx->adev->rings[i]->sched,
-			&ctx->rings[i].entity);
+		drm_sched_entity_destroy(&ctx->rings[i].entity);
 	}
 
 	amdgpu_ctx_fini(ref);
@@ -466,8 +464,8 @@ void amdgpu_ctx_mgr_entity_flush(struct
 			if (ctx->adev->rings[i] == &ctx->adev->gfx.kiq.ring)
 				continue;
 
-			max_wait = drm_sched_entity_flush(&ctx->adev->rings[i]->sched,
-					  &ctx->rings[i].entity, max_wait);
+			max_wait = drm_sched_entity_flush(&ctx->rings[i].entity,
+							  max_wait);
 		}
 	}
 	mutex_unlock(&mgr->lock);
@@ -492,8 +490,7 @@ void amdgpu_ctx_mgr_entity_fini(struct a
 				continue;
 
 			if (kref_read(&ctx->refcount) == 1)
-				drm_sched_entity_fini(&ctx->adev->rings[i]->sched,
-					&ctx->rings[i].entity);
+				drm_sched_entity_fini(&ctx->rings[i].entity);
 			else
 				DRM_ERROR("ctx %p is still alive\n", ctx);
 		}
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
@@ -133,7 +133,7 @@ int amdgpu_job_submit(struct amdgpu_job
 	if (!f)
 		return -EINVAL;
 
-	r = drm_sched_job_init(&job->base, entity->sched, entity, owner);
+	r = drm_sched_job_init(&job->base, entity, owner);
 	if (r)
 		return r;
 
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -1926,8 +1926,7 @@ void amdgpu_ttm_set_buffer_funcs_status(
 			return;
 		}
 	} else {
-		drm_sched_entity_destroy(adev->mman.entity.sched,
-					 &adev->mman.entity);
+		drm_sched_entity_destroy(&adev->mman.entity);
 		dma_fence_put(man->move);
 		man->move = NULL;
 	}
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -305,8 +305,7 @@ int amdgpu_uvd_sw_fini(struct amdgpu_dev
 {
 	int i, j;
 
-	drm_sched_entity_destroy(&adev->uvd.inst->ring.sched,
-				 &adev->uvd.entity);
+	drm_sched_entity_destroy(&adev->uvd.entity);
 
 	for (j = 0; j < adev->uvd.num_uvd_inst; ++j) {
 		kfree(adev->uvd.inst[j].saved_bo);
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
@@ -221,7 +221,7 @@ int amdgpu_vce_sw_fini(struct amdgpu_dev
 	if (adev->vce.vcpu_bo == NULL)
 		return 0;
 
-	drm_sched_entity_destroy(&adev->vce.ring[0].sched, &adev->vce.entity);
+	drm_sched_entity_destroy(&adev->vce.entity);
 
 	amdgpu_bo_free_kernel(&adev->vce.vcpu_bo, &adev->vce.gpu_addr,
 		(void **)&adev->vce.cpu_addr);
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -2477,7 +2477,7 @@ error_free_root:
 	vm->root.base.bo = NULL;
 
 error_free_sched_entity:
-	drm_sched_entity_destroy(&ring->sched, &vm->entity);
+	drm_sched_entity_destroy(&vm->entity);
 
 	return r;
 }
@@ -2610,7 +2610,7 @@ void amdgpu_vm_fini(struct amdgpu_device
 		spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
 	}
 
-	drm_sched_entity_destroy(vm->entity.sched, &vm->entity);
+	drm_sched_entity_destroy(&vm->entity);
 
 	if (!RB_EMPTY_ROOT(&vm->va)) {
 		dev_err(adev->dev, "still active bo inside vm\n");
--- a/drivers/gpu/drm/etnaviv/etnaviv_drv.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
@@ -78,8 +78,7 @@ static void etnaviv_postclose(struct drm
 				gpu->lastctx = NULL;
 			mutex_unlock(&gpu->lock);
 
-			drm_sched_entity_destroy(&gpu->sched,
-						&ctx->sched_entity[i]);
+			drm_sched_entity_destroy(&ctx->sched_entity[i]);
 		}
 	}
 
--- a/drivers/gpu/drm/etnaviv/etnaviv_sched.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_sched.c
@@ -142,8 +142,8 @@ int etnaviv_sched_push_job(struct drm_sc
 {
 	int ret;
 
-	ret = drm_sched_job_init(&submit->sched_job, &submit->gpu->sched,
-				 sched_entity, submit->cmdbuf.ctx);
+	ret = drm_sched_job_init(&submit->sched_job, sched_entity,
+				 submit->cmdbuf.ctx);
 	if (ret)
 		return ret;
 
--- a/drivers/gpu/drm/scheduler/gpu_scheduler.c
+++ b/drivers/gpu/drm/scheduler/gpu_scheduler.c
@@ -273,11 +273,12 @@ static void drm_sched_entity_kill_jobs_c
  *
  * Returns the remaining time in jiffies left from the input timeout
  */
-long drm_sched_entity_flush(struct drm_gpu_scheduler *sched,
-			   struct drm_sched_entity *entity, long timeout)
+long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout)
 {
+	struct drm_gpu_scheduler *sched;
 	long ret = timeout;
 
+	sched = entity->sched;
 	if (!drm_sched_entity_is_initialized(sched, entity))
 		return ret;
 	/**
@@ -312,10 +313,11 @@ EXPORT_SYMBOL(drm_sched_entity_flush);
  * entity and signals all jobs with an error code if the process was killed.
  *
  */
-void drm_sched_entity_fini(struct drm_gpu_scheduler *sched,
-			   struct drm_sched_entity *entity)
+void drm_sched_entity_fini(struct drm_sched_entity *entity)
 {
+	struct drm_gpu_scheduler *sched;
 
+	sched = entity->sched;
 	drm_sched_entity_set_rq(entity, NULL);
 
 	/* Consumption of existing IBs wasn't completed. Forcefully
@@ -373,11 +375,10 @@ EXPORT_SYMBOL(drm_sched_entity_fini);
  *
  * Calls drm_sched_entity_do_release() and drm_sched_entity_cleanup()
  */
-void drm_sched_entity_destroy(struct drm_gpu_scheduler *sched,
-				struct drm_sched_entity *entity)
+void drm_sched_entity_destroy(struct drm_sched_entity *entity)
 {
-	drm_sched_entity_flush(sched, entity, MAX_WAIT_SCHED_ENTITY_Q_EMPTY);
-	drm_sched_entity_fini(sched, entity);
+	drm_sched_entity_flush(entity, MAX_WAIT_SCHED_ENTITY_Q_EMPTY);
+	drm_sched_entity_fini(entity);
 }
 EXPORT_SYMBOL(drm_sched_entity_destroy);
 
@@ -740,10 +741,11 @@ EXPORT_SYMBOL(drm_sched_job_recovery);
  * Returns 0 for success, negative error code otherwise.
  */
 int drm_sched_job_init(struct drm_sched_job *job,
-		       struct drm_gpu_scheduler *sched,
 		       struct drm_sched_entity *entity,
 		       void *owner)
 {
+	struct drm_gpu_scheduler *sched = entity->sched;
+
 	job->sched = sched;
 	job->entity = entity;
 	job->s_priority = entity->rq - sched->sched_rq;
--- a/drivers/gpu/drm/v3d/v3d_drv.c
+++ b/drivers/gpu/drm/v3d/v3d_drv.c
@@ -145,13 +145,11 @@ v3d_open(struct drm_device *dev, struct
 static void
 v3d_postclose(struct drm_device *dev, struct drm_file *file)
 {
-	struct v3d_dev *v3d = to_v3d_dev(dev);
 	struct v3d_file_priv *v3d_priv = file->driver_priv;
 	enum v3d_queue q;
 
 	for (q = 0; q < V3D_MAX_QUEUES; q++) {
-		drm_sched_entity_destroy(&v3d->queue[q].sched,
-				      &v3d_priv->sched_entity[q]);
+		drm_sched_entity_destroy(&v3d_priv->sched_entity[q]);
 	}
 
 	kfree(v3d_priv);
--- a/drivers/gpu/drm/v3d/v3d_gem.c
+++ b/drivers/gpu/drm/v3d/v3d_gem.c
@@ -553,7 +553,6 @@ v3d_submit_cl_ioctl(struct drm_device *d
 	mutex_lock(&v3d->sched_lock);
 	if (exec->bin.start != exec->bin.end) {
 		ret = drm_sched_job_init(&exec->bin.base,
-					 &v3d->queue[V3D_BIN].sched,
 					 &v3d_priv->sched_entity[V3D_BIN],
 					 v3d_priv);
 		if (ret)
@@ -568,7 +567,6 @@ v3d_submit_cl_ioctl(struct drm_device *d
 	}
 
 	ret = drm_sched_job_init(&exec->render.base,
-				 &v3d->queue[V3D_RENDER].sched,
 				 &v3d_priv->sched_entity[V3D_RENDER],
 				 v3d_priv);
 	if (ret)
--- a/include/drm/gpu_scheduler.h
+++ b/include/drm/gpu_scheduler.h
@@ -286,12 +286,9 @@ int drm_sched_entity_init(struct drm_sch
 			  struct drm_sched_rq **rq_list,
 			  unsigned int num_rq_list,
 			  atomic_t *guilty);
-long drm_sched_entity_flush(struct drm_gpu_scheduler *sched,
-			   struct drm_sched_entity *entity, long timeout);
-void drm_sched_entity_fini(struct drm_gpu_scheduler *sched,
-			   struct drm_sched_entity *entity);
-void drm_sched_entity_destroy(struct drm_gpu_scheduler *sched,
-			   struct drm_sched_entity *entity);
+long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout);
+void drm_sched_entity_fini(struct drm_sched_entity *entity);
+void drm_sched_entity_destroy(struct drm_sched_entity *entity);
 void drm_sched_entity_push_job(struct drm_sched_job *sched_job,
 			       struct drm_sched_entity *entity);
 void drm_sched_entity_set_rq(struct drm_sched_entity *entity,
@@ -302,7 +299,6 @@ struct drm_sched_fence *drm_sched_fence_
 void drm_sched_fence_scheduled(struct drm_sched_fence *fence);
 void drm_sched_fence_finished(struct drm_sched_fence *fence);
 int drm_sched_job_init(struct drm_sched_job *job,
-		       struct drm_gpu_scheduler *sched,
 		       struct drm_sched_entity *entity,
 		       void *owner);
 void drm_sched_hw_job_reset(struct drm_gpu_scheduler *sched,