Blob Blame History Raw
From: Nayan Deshmukh <nayan26deshmukh@gmail.com>
Date: Fri, 13 Jul 2018 15:21:14 +0530
Subject: drm/scheduler: modify args of drm_sched_entity_init
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
Git-commit: aa16b6c6b4d979234f830a48add47d02c12bb569
Patch-mainline: v4.19-rc1
References: FATE#326289 FATE#326079 FATE#326049 FATE#322398 FATE#326166

replace run queue by a list of run queues and remove the
sched arg as that is part of run queue itself

Signed-off-by: Nayan Deshmukh <nayan26deshmukh@gmail.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Acked-by: Eric Anholt <eric@anholt.net>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Acked-by: Petr Tesarik <ptesarik@suse.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c   |    4 ++--
 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c   |    3 +--
 drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c   |    4 ++--
 drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c   |    3 +--
 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c    |    3 +--
 drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c     |    4 ++--
 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c     |    4 ++--
 drivers/gpu/drm/etnaviv/etnaviv_drv.c     |    8 ++++----
 drivers/gpu/drm/scheduler/gpu_scheduler.c |   20 ++++++++++++--------
 drivers/gpu/drm/v3d/v3d_drv.c             |    7 +++----
 include/drm/gpu_scheduler.h               |    6 +++---
 11 files changed, 33 insertions(+), 33 deletions(-)

--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
@@ -90,8 +90,8 @@ static int amdgpu_ctx_init(struct amdgpu
 		if (ring == &adev->gfx.kiq.ring)
 			continue;
 
-		r = drm_sched_entity_init(&ring->sched, &ctx->rings[i].entity,
-					  rq, &ctx->guilty);
+		r = drm_sched_entity_init(&ctx->rings[i].entity,
+					  &rq, 1, &ctx->guilty);
 		if (r)
 			goto failed;
 	}
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -1919,8 +1919,7 @@ void amdgpu_ttm_set_buffer_funcs_status(
 
 		ring = adev->mman.buffer_funcs_ring;
 		rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_KERNEL];
-		r = drm_sched_entity_init(&ring->sched, &adev->mman.entity,
-					  rq, NULL);
+		r = drm_sched_entity_init(&adev->mman.entity, &rq, 1, NULL);
 		if (r) {
 			DRM_ERROR("Failed setting up TTM BO move entity (%d)\n",
 				  r);
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -266,8 +266,8 @@ int amdgpu_uvd_sw_init(struct amdgpu_dev
 
 		ring = &adev->uvd.inst[j].ring;
 		rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
-		r = drm_sched_entity_init(&ring->sched, &adev->uvd.inst[j].entity,
-					  rq, NULL);
+		r = drm_sched_entity_init(&adev->uvd.inst[j].entity, &rq,
+					  1, NULL);
 		if (r != 0) {
 			DRM_ERROR("Failed setting up UVD(%d) run queue.\n", j);
 			return r;
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
@@ -190,8 +190,7 @@ int amdgpu_vce_sw_init(struct amdgpu_dev
 
 	ring = &adev->vce.ring[0];
 	rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
-	r = drm_sched_entity_init(&ring->sched, &adev->vce.entity,
-				  rq, NULL);
+	r = drm_sched_entity_init(&adev->vce.entity, &rq, 1, NULL);
 	if (r != 0) {
 		DRM_ERROR("Failed setting up VCE run queue.\n");
 		return r;
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -2399,8 +2399,7 @@ int amdgpu_vm_init(struct amdgpu_device
 	ring_instance %= adev->vm_manager.vm_pte_num_rings;
 	ring = adev->vm_manager.vm_pte_rings[ring_instance];
 	rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_KERNEL];
-	r = drm_sched_entity_init(&ring->sched, &vm->entity,
-				  rq, NULL);
+	r = drm_sched_entity_init(&vm->entity, &rq, 1, NULL);
 	if (r)
 		return r;
 
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
@@ -430,8 +430,8 @@ static int uvd_v6_0_sw_init(void *handle
 		struct drm_sched_rq *rq;
 		ring = &adev->uvd.inst->ring_enc[0];
 		rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
-		r = drm_sched_entity_init(&ring->sched, &adev->uvd.inst->entity_enc,
-					  rq, NULL);
+		r = drm_sched_entity_init(&adev->uvd.inst->entity_enc,
+					  &rq, 1, NULL);
 		if (r) {
 			DRM_ERROR("Failed setting up UVD ENC run queue.\n");
 			return r;
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
@@ -432,8 +432,8 @@ static int uvd_v7_0_sw_init(void *handle
 	for (j = 0; j < adev->uvd.num_uvd_inst; j++) {
 		ring = &adev->uvd.inst[j].ring_enc[0];
 		rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
-		r = drm_sched_entity_init(&ring->sched, &adev->uvd.inst[j].entity_enc,
-					  rq, NULL);
+		r = drm_sched_entity_init(&adev->uvd.inst[j].entity_enc,
+					  &rq, 1, NULL);
 		if (r) {
 			DRM_ERROR("(%d)Failed setting up UVD ENC run queue.\n", j);
 			return r;
--- a/drivers/gpu/drm/etnaviv/etnaviv_drv.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
@@ -49,12 +49,12 @@ static int etnaviv_open(struct drm_devic
 
 	for (i = 0; i < ETNA_MAX_PIPES; i++) {
 		struct etnaviv_gpu *gpu = priv->gpu[i];
+		struct drm_sched_rq *rq;
 
 		if (gpu) {
-			drm_sched_entity_init(&gpu->sched,
-				&ctx->sched_entity[i],
-				&gpu->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL],
-				NULL);
+			rq = &gpu->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
+			drm_sched_entity_init(&ctx->sched_entity[i],
+					      &rq, 1, NULL);
 			}
 	}
 
--- a/drivers/gpu/drm/scheduler/gpu_scheduler.c
+++ b/drivers/gpu/drm/scheduler/gpu_scheduler.c
@@ -162,26 +162,30 @@ drm_sched_rq_select_entity(struct drm_sc
  * drm_sched_entity_init - Init a context entity used by scheduler when
  * submit to HW ring.
  *
- * @sched: scheduler instance
  * @entity: scheduler entity to init
- * @rq: the run queue this entity belongs
+ * @rq_list: the list of run queue on which jobs from this
+ *           entity can be submitted
+ * @num_rq_list: number of run queue in rq_list
  * @guilty: atomic_t set to 1 when a job on this queue
  *          is found to be guilty causing a timeout
  *
+ * Note: the rq_list should have atleast one element to schedule
+ *       the entity
+ *
  * Returns 0 on success or a negative error code on failure.
 */
-int drm_sched_entity_init(struct drm_gpu_scheduler *sched,
-			  struct drm_sched_entity *entity,
-			  struct drm_sched_rq *rq,
+int drm_sched_entity_init(struct drm_sched_entity *entity,
+			  struct drm_sched_rq **rq_list,
+			  unsigned int num_rq_list,
 			  atomic_t *guilty)
 {
-	if (!(sched && entity && rq))
+	if (!(entity && rq_list && num_rq_list > 0 && rq_list[0]))
 		return -EINVAL;
 
 	memset(entity, 0, sizeof(struct drm_sched_entity));
 	INIT_LIST_HEAD(&entity->list);
-	entity->rq = rq;
-	entity->sched = sched;
+	entity->rq = rq_list[0];
+	entity->sched = rq_list[0]->sched;
 	entity->guilty = guilty;
 	entity->last_scheduled = NULL;
 
--- a/drivers/gpu/drm/v3d/v3d_drv.c
+++ b/drivers/gpu/drm/v3d/v3d_drv.c
@@ -123,6 +123,7 @@ v3d_open(struct drm_device *dev, struct
 {
 	struct v3d_dev *v3d = to_v3d_dev(dev);
 	struct v3d_file_priv *v3d_priv;
+	struct drm_sched_rq *rq;
 	int i;
 
 	v3d_priv = kzalloc(sizeof(*v3d_priv), GFP_KERNEL);
@@ -132,10 +133,8 @@ v3d_open(struct drm_device *dev, struct
 	v3d_priv->v3d = v3d;
 
 	for (i = 0; i < V3D_MAX_QUEUES; i++) {
-		drm_sched_entity_init(&v3d->queue[i].sched,
-				      &v3d_priv->sched_entity[i],
-				      &v3d->queue[i].sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL],
-				      NULL);
+		rq = &v3d->queue[i].sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
+		drm_sched_entity_init(&v3d_priv->sched_entity[i], &rq, 1, NULL);
 	}
 
 	file->driver_priv = v3d_priv;
--- a/include/drm/gpu_scheduler.h
+++ b/include/drm/gpu_scheduler.h
@@ -282,9 +282,9 @@ int drm_sched_init(struct drm_gpu_schedu
 		   const char *name);
 void drm_sched_fini(struct drm_gpu_scheduler *sched);
 
-int drm_sched_entity_init(struct drm_gpu_scheduler *sched,
-			  struct drm_sched_entity *entity,
-			  struct drm_sched_rq *rq,
+int drm_sched_entity_init(struct drm_sched_entity *entity,
+			  struct drm_sched_rq **rq_list,
+			  unsigned int num_rq_list,
 			  atomic_t *guilty);
 long drm_sched_entity_flush(struct drm_gpu_scheduler *sched,
 			   struct drm_sched_entity *entity, long timeout);