From: Lucas Stach Date: Mon, 4 Dec 2017 19:24:06 +0100 Subject: drm/etnaviv: move dependency handling to scheduler Git-commit: 683da226f88dde7bf68940c21418995b63baae2f Patch-mainline: v4.17-rc1 References: FATE#326289 FATE#326079 FATE#326049 FATE#322398 FATE#326166 Move the fence dependency handling to the scheduler where it belongs. Jobs with unsignaled dependencies just get to sit in the scheduler queue without holding any locks. Signed-off-by: Lucas Stach Acked-by: Petr Tesarik --- drivers/gpu/drm/etnaviv/etnaviv_gem.h | 3 + drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c | 40 ++++++++++++---------- drivers/gpu/drm/etnaviv/etnaviv_gpu.c | 48 --------------------------- drivers/gpu/drm/etnaviv/etnaviv_gpu.h | 3 - drivers/gpu/drm/etnaviv/etnaviv_sched.c | 45 +++++++++++++++++++++++++ 5 files changed, 70 insertions(+), 69 deletions(-) --- a/drivers/gpu/drm/etnaviv/etnaviv_gem.h +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.h @@ -94,6 +94,9 @@ struct etnaviv_gem_submit_bo { u32 flags; struct etnaviv_gem_object *obj; struct etnaviv_vram_mapping *mapping; + struct dma_fence *excl; + unsigned int nr_shared; + struct dma_fence **shared; }; /* Created per submit-ioctl, to track bo's and cmdstream bufs, etc, --- a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c @@ -170,29 +170,33 @@ fail: return ret; } -static int submit_fence_sync(const struct etnaviv_gem_submit *submit) +static int submit_fence_sync(struct etnaviv_gem_submit *submit) { - unsigned int context = submit->gpu->fence_context; int i, ret = 0; for (i = 0; i < submit->nr_bos; i++) { - struct etnaviv_gem_object *etnaviv_obj = submit->bos[i].obj; - bool write = submit->bos[i].flags & ETNA_SUBMIT_BO_WRITE; - bool explicit = !!(submit->flags & ETNA_SUBMIT_NO_IMPLICIT); - - ret = etnaviv_gpu_fence_sync_obj(etnaviv_obj, context, write, - explicit); - if (ret) - break; - } + struct etnaviv_gem_submit_bo *bo = &submit->bos[i]; + struct reservation_object *robj = bo->obj->resv; + + if (!(bo->flags & ETNA_SUBMIT_BO_WRITE)) { + ret = reservation_object_reserve_shared(robj); + if (ret) + return ret; + } + + if (submit->flags & ETNA_SUBMIT_NO_IMPLICIT) + continue; + + if (bo->flags & ETNA_SUBMIT_BO_WRITE) { + ret = reservation_object_get_fences_rcu(robj, &bo->excl, + &bo->nr_shared, + &bo->shared); + if (ret) + return ret; + } else { + bo->excl = reservation_object_get_excl_rcu(robj); + } - if (submit->flags & ETNA_SUBMIT_FENCE_FD_IN) { - /* - * Wait if the fence is from a foreign context, or if the fence - * array contains any fence from a foreign context. - */ - if (!dma_fence_match_context(submit->in_fence, context)) - ret = dma_fence_wait(submit->in_fence, true); } return ret; --- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c @@ -1084,54 +1084,6 @@ static struct dma_fence *etnaviv_gpu_fen return &f->base; } -int etnaviv_gpu_fence_sync_obj(struct etnaviv_gem_object *etnaviv_obj, - unsigned int context, bool exclusive, bool explicit) -{ - struct reservation_object *robj = etnaviv_obj->resv; - struct reservation_object_list *fobj; - struct dma_fence *fence; - int i, ret; - - if (!exclusive) { - ret = reservation_object_reserve_shared(robj); - if (ret) - return ret; - } - - if (explicit) - return 0; - - /* - * If we have any shared fences, then the exclusive fence - * should be ignored as it will already have been signalled. - */ - fobj = reservation_object_get_list(robj); - if (!fobj || fobj->shared_count == 0) { - /* Wait on any existing exclusive fence which isn't our own */ - fence = reservation_object_get_excl(robj); - if (fence && fence->context != context) { - ret = dma_fence_wait(fence, true); - if (ret) - return ret; - } - } - - if (!exclusive || !fobj) - return 0; - - for (i = 0; i < fobj->shared_count; i++) { - fence = rcu_dereference_protected(fobj->shared[i], - reservation_object_held(robj)); - if (fence->context != context) { - ret = dma_fence_wait(fence, true); - if (ret) - return ret; - } - } - - return 0; -} - /* * event management: */ --- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h +++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h @@ -188,9 +188,6 @@ int etnaviv_gpu_init(struct etnaviv_gpu int etnaviv_gpu_debugfs(struct etnaviv_gpu *gpu, struct seq_file *m); #endif -int etnaviv_gpu_fence_sync_obj(struct etnaviv_gem_object *etnaviv_obj, - unsigned int context, bool exclusive, bool implicit); - void etnaviv_gpu_retire(struct etnaviv_gpu *gpu); int etnaviv_gpu_wait_fence_interruptible(struct etnaviv_gpu *gpu, u32 fence, struct timespec *timeout); --- a/drivers/gpu/drm/etnaviv/etnaviv_sched.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_sched.c @@ -35,6 +35,51 @@ struct etnaviv_gem_submit *to_etnaviv_su struct dma_fence *etnaviv_sched_dependency(struct drm_sched_job *sched_job, struct drm_sched_entity *entity) { + struct etnaviv_gem_submit *submit = to_etnaviv_submit(sched_job); + struct dma_fence *fence; + int i; + + if (unlikely(submit->in_fence)) { + fence = submit->in_fence; + submit->in_fence = NULL; + + if (!dma_fence_is_signaled(fence)) + return fence; + + dma_fence_put(fence); + } + + for (i = 0; i < submit->nr_bos; i++) { + struct etnaviv_gem_submit_bo *bo = &submit->bos[i]; + int j; + + if (bo->excl) { + fence = bo->excl; + bo->excl = NULL; + + if (!dma_fence_is_signaled(fence)) + return fence; + + dma_fence_put(fence); + } + + for (j = 0; j < bo->nr_shared; j++) { + if (!bo->shared[j]) + continue; + + fence = bo->shared[j]; + bo->shared[j] = NULL; + + if (!dma_fence_is_signaled(fence)) + return fence; + + dma_fence_put(fence); + } + kfree(bo->shared); + bo->nr_shared = 0; + bo->shared = NULL; + } + return NULL; }