From: Jordan Crouse Date: Fri, 20 Oct 2017 11:06:56 -0600 Subject: drm/msm: Move memptrs to msm_gpu Git-commit: cd414f3d931687eb1ebeb87533d85537e315f195 Patch-mainline: v4.15-rc1 References: FATE#326289 FATE#326079 FATE#326049 FATE#322398 FATE#326166 When we move to multiple ringbuffers we're going to store the data in the memptrs on a per-ring basis. In order to prepare for that move the current memptrs from the adreno namespace into msm_gpu. This is way cleaner and immediately lets us kill off some sub functions so there is much less cost later when we do move to per-ring structs. Signed-off-by: Jordan Crouse Signed-off-by: Rob Clark Acked-by: Petr Tesarik --- drivers/gpu/drm/msm/adreno/a3xx_gpu.c | 1 drivers/gpu/drm/msm/adreno/a4xx_gpu.c | 1 drivers/gpu/drm/msm/adreno/a5xx_gpu.c | 8 +--- drivers/gpu/drm/msm/adreno/adreno_gpu.c | 55 ++++++-------------------------- drivers/gpu/drm/msm/adreno/adreno_gpu.h | 16 --------- drivers/gpu/drm/msm/msm_gpu.c | 29 +++++++++++++++- drivers/gpu/drm/msm/msm_gpu.h | 17 ++++++++- 7 files changed, 56 insertions(+), 71 deletions(-) --- a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c @@ -444,7 +444,6 @@ static const struct adreno_gpu_funcs fun .pm_suspend = msm_gpu_pm_suspend, .pm_resume = msm_gpu_pm_resume, .recover = a3xx_recover, - .last_fence = adreno_last_fence, .submit = adreno_submit, .flush = adreno_flush, .irq = a3xx_irq, --- a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c @@ -532,7 +532,6 @@ static const struct adreno_gpu_funcs fun .pm_suspend = a4xx_pm_suspend, .pm_resume = a4xx_pm_resume, .recover = a4xx_recover, - .last_fence = adreno_last_fence, .submit = adreno_submit, .flush = adreno_flush, .irq = a4xx_irq, --- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c @@ -116,7 +116,6 @@ out: static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit, struct msm_file_private *ctx) { - struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); struct msm_drm_private *priv = gpu->dev->dev_private; struct msm_ringbuffer *ring = gpu->rb; unsigned int i, ibs = 0; @@ -143,8 +142,8 @@ static void a5xx_submit(struct msm_gpu * OUT_PKT7(ring, CP_EVENT_WRITE, 4); OUT_RING(ring, CACHE_FLUSH_TS | (1 << 31)); - OUT_RING(ring, lower_32_bits(rbmemptr(adreno_gpu, fence))); - OUT_RING(ring, upper_32_bits(rbmemptr(adreno_gpu, fence))); + OUT_RING(ring, lower_32_bits(rbmemptr(gpu, fence))); + OUT_RING(ring, upper_32_bits(rbmemptr(gpu, fence))); OUT_RING(ring, submit->fence->seqno); gpu->funcs->flush(gpu); @@ -821,7 +820,7 @@ static void a5xx_fault_detect_irq(struct struct msm_drm_private *priv = dev->dev_private; dev_err(dev->dev, "gpu fault fence %x status %8.8X rb %4.4x/%4.4x ib1 %16.16llX/%4.4x ib2 %16.16llX/%4.4x\n", - gpu->funcs->last_fence(gpu), + gpu->memptrs->fence, gpu_read(gpu, REG_A5XX_RBBM_STATUS), gpu_read(gpu, REG_A5XX_CP_RB_RPTR), gpu_read(gpu, REG_A5XX_CP_RB_WPTR), @@ -1009,7 +1008,6 @@ static const struct adreno_gpu_funcs fun .pm_suspend = a5xx_pm_suspend, .pm_resume = a5xx_pm_resume, .recover = a5xx_recover, - .last_fence = adreno_last_fence, .submit = a5xx_submit, .flush = adreno_flush, .irq = a5xx_irq, --- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c +++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c @@ -182,8 +182,8 @@ int adreno_hw_init(struct msm_gpu *gpu) gpu->rb->cur = gpu->rb->start; /* reset completed fence seqno: */ - adreno_gpu->memptrs->fence = gpu->fctx->completed_fence; - adreno_gpu->memptrs->rptr = 0; + gpu->memptrs->fence = gpu->fctx->completed_fence; + gpu->memptrs->rptr = 0; /* Setup REG_CP_RB_CNTL: */ adreno_gpu_write(adreno_gpu, REG_ADRENO_CP_RB_CNTL, @@ -198,8 +198,7 @@ int adreno_hw_init(struct msm_gpu *gpu) if (!adreno_is_a430(adreno_gpu)) { adreno_gpu_write64(adreno_gpu, REG_ADRENO_CP_RB_RPTR_ADDR, - REG_ADRENO_CP_RB_RPTR_ADDR_HI, - rbmemptr(adreno_gpu, rptr)); + REG_ADRENO_CP_RB_RPTR_ADDR_HI, rbmemptr(gpu, rptr)); } return 0; @@ -213,17 +212,13 @@ static uint32_t get_wptr(struct msm_ring /* Use this helper to read rptr, since a430 doesn't update rptr in memory */ static uint32_t get_rptr(struct adreno_gpu *adreno_gpu) { + struct msm_gpu *gpu = &adreno_gpu->base; + if (adreno_is_a430(adreno_gpu)) - return adreno_gpu->memptrs->rptr = adreno_gpu_read( + return gpu->memptrs->rptr = adreno_gpu_read( adreno_gpu, REG_ADRENO_CP_RB_RPTR); else - return adreno_gpu->memptrs->rptr; -} - -uint32_t adreno_last_fence(struct msm_gpu *gpu) -{ - struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); - return adreno_gpu->memptrs->fence; + return gpu->memptrs->rptr; } void adreno_recover(struct msm_gpu *gpu) @@ -288,7 +283,7 @@ void adreno_submit(struct msm_gpu *gpu, OUT_PKT3(ring, CP_EVENT_WRITE, 3); OUT_RING(ring, CACHE_FLUSH_TS); - OUT_RING(ring, rbmemptr(adreno_gpu, fence)); + OUT_RING(ring, rbmemptr(gpu, fence)); OUT_RING(ring, submit->fence->seqno); /* we could maybe be clever and only CP_COND_EXEC the interrupt: */ @@ -361,7 +356,7 @@ void adreno_show(struct msm_gpu *gpu, st adreno_gpu->rev.major, adreno_gpu->rev.minor, adreno_gpu->rev.patchid); - seq_printf(m, "fence: %d/%d\n", adreno_gpu->memptrs->fence, + seq_printf(m, "fence: %d/%d\n", gpu->memptrs->fence, gpu->fctx->last_fence); seq_printf(m, "rptr: %d\n", get_rptr(adreno_gpu)); seq_printf(m, "rb wptr: %d\n", get_wptr(gpu->rb)); @@ -396,7 +391,7 @@ void adreno_dump_info(struct msm_gpu *gp adreno_gpu->rev.major, adreno_gpu->rev.minor, adreno_gpu->rev.patchid); - printk("fence: %d/%d\n", adreno_gpu->memptrs->fence, + printk("fence: %d/%d\n", gpu->memptrs->fence, gpu->fctx->last_fence); printk("rptr: %d\n", get_rptr(adreno_gpu)); printk("rb wptr: %d\n", get_wptr(gpu->rb)); @@ -443,7 +438,6 @@ int adreno_gpu_init(struct drm_device *d struct adreno_platform_config *config = pdev->dev.platform_data; struct msm_gpu_config adreno_gpu_config = { 0 }; struct msm_gpu *gpu = &adreno_gpu->base; - int ret; adreno_gpu->funcs = funcs; adreno_gpu->info = adreno_info(config->rev); @@ -472,39 +466,14 @@ int adreno_gpu_init(struct drm_device *d pm_runtime_use_autosuspend(&pdev->dev); pm_runtime_enable(&pdev->dev); - ret = msm_gpu_init(drm, pdev, &adreno_gpu->base, &funcs->base, + return msm_gpu_init(drm, pdev, &adreno_gpu->base, &funcs->base, adreno_gpu->info->name, &adreno_gpu_config); - if (ret) - return ret; - - adreno_gpu->memptrs = msm_gem_kernel_new(drm, - sizeof(*adreno_gpu->memptrs), MSM_BO_UNCACHED, gpu->aspace, - &adreno_gpu->memptrs_bo, &adreno_gpu->memptrs_iova); - - if (IS_ERR(adreno_gpu->memptrs)) { - ret = PTR_ERR(adreno_gpu->memptrs); - adreno_gpu->memptrs = NULL; - dev_err(drm->dev, "could not allocate memptrs: %d\n", ret); - } - - return ret; } void adreno_gpu_cleanup(struct adreno_gpu *adreno_gpu) { - struct msm_gpu *gpu = &adreno_gpu->base; - - if (adreno_gpu->memptrs_bo) { - if (adreno_gpu->memptrs) - msm_gem_put_vaddr(adreno_gpu->memptrs_bo); - - if (adreno_gpu->memptrs_iova) - msm_gem_put_iova(adreno_gpu->memptrs_bo, gpu->aspace); - - drm_gem_object_unreference_unlocked(adreno_gpu->memptrs_bo); - } release_firmware(adreno_gpu->pm4); release_firmware(adreno_gpu->pfp); - msm_gpu_cleanup(gpu); + msm_gpu_cleanup(&adreno_gpu->base); } --- a/drivers/gpu/drm/msm/adreno/adreno_gpu.h +++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.h @@ -82,14 +82,6 @@ struct adreno_info { const struct adreno_info *adreno_info(struct adreno_rev rev); -#define rbmemptr(adreno_gpu, member) \ - ((adreno_gpu)->memptrs_iova + offsetof(struct adreno_rbmemptrs, member)) - -struct adreno_rbmemptrs { - volatile uint32_t rptr; - volatile uint32_t fence; -}; - struct adreno_gpu { struct msm_gpu base; struct adreno_rev rev; @@ -125,13 +117,6 @@ struct adreno_gpu { /* firmware: */ const struct firmware *pm4, *pfp; - /* ringbuffer rptr/wptr: */ - // TODO should this be in msm_ringbuffer? I think it would be - // different for z180.. - struct adreno_rbmemptrs *memptrs; - struct drm_gem_object *memptrs_bo; - uint64_t memptrs_iova; - /* * Register offsets are different between some GPUs. * GPU specific offsets will be exported by GPU specific @@ -220,7 +205,6 @@ int adreno_get_param(struct msm_gpu *gpu const struct firmware *adreno_request_fw(struct adreno_gpu *adreno_gpu, const char *fwname); int adreno_hw_init(struct msm_gpu *gpu); -uint32_t adreno_last_fence(struct msm_gpu *gpu); void adreno_recover(struct msm_gpu *gpu); void adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit, struct msm_file_private *ctx); --- a/drivers/gpu/drm/msm/msm_gpu.c +++ b/drivers/gpu/drm/msm/msm_gpu.c @@ -228,7 +228,7 @@ static void recover_worker(struct work_s struct msm_gpu *gpu = container_of(work, struct msm_gpu, recover_work); struct drm_device *dev = gpu->dev; struct msm_gem_submit *submit; - uint32_t fence = gpu->funcs->last_fence(gpu); + uint32_t fence = gpu->memptrs->fence; msm_update_fence(gpu->fctx, fence + 1); @@ -281,7 +281,7 @@ static void hangcheck_handler(unsigned l struct msm_gpu *gpu = (struct msm_gpu *)data; struct drm_device *dev = gpu->dev; struct msm_drm_private *priv = dev->dev_private; - uint32_t fence = gpu->funcs->last_fence(gpu); + uint32_t fence = gpu->memptrs->fence; if (fence != gpu->hangcheck_fence) { /* some progress has been made.. ya! */ @@ -449,7 +449,7 @@ static void retire_worker(struct work_st { struct msm_gpu *gpu = container_of(work, struct msm_gpu, retire_work); struct drm_device *dev = gpu->dev; - uint32_t fence = gpu->funcs->last_fence(gpu); + uint32_t fence = gpu->memptrs->fence; msm_update_fence(gpu->fctx, fence); @@ -689,6 +689,17 @@ int msm_gpu_init(struct drm_device *drm, goto fail; } + gpu->memptrs = msm_gem_kernel_new(drm, sizeof(*gpu->memptrs_bo), + MSM_BO_UNCACHED, gpu->aspace, &gpu->memptrs_bo, + &gpu->memptrs_iova); + + if (IS_ERR(gpu->memptrs)) { + ret = PTR_ERR(gpu->memptrs); + gpu->memptrs = NULL; + dev_err(drm->dev, "could not allocate memptrs: %d\n", ret); + goto fail; + } + /* Create ringbuffer: */ gpu->rb = msm_ringbuffer_new(gpu, config->ringsz); if (IS_ERR(gpu->rb)) { @@ -701,6 +712,12 @@ int msm_gpu_init(struct drm_device *drm, return 0; fail: + if (gpu->memptrs_bo) { + msm_gem_put_vaddr(gpu->memptrs_bo); + msm_gem_put_iova(gpu->memptrs_bo, gpu->aspace); + drm_gem_object_unreference_unlocked(gpu->memptrs_bo); + } + platform_set_drvdata(pdev, NULL); return ret; } @@ -719,6 +736,12 @@ void msm_gpu_cleanup(struct msm_gpu *gpu msm_ringbuffer_destroy(gpu->rb); } + if (gpu->memptrs_bo) { + msm_gem_put_vaddr(gpu->memptrs_bo); + msm_gem_put_iova(gpu->memptrs_bo, gpu->aspace); + drm_gem_object_unreference_unlocked(gpu->memptrs_bo); + } + if (!IS_ERR_OR_NULL(gpu->aspace)) { gpu->aspace->mmu->funcs->detach(gpu->aspace->mmu, NULL, 0); --- a/drivers/gpu/drm/msm/msm_gpu.h +++ b/drivers/gpu/drm/msm/msm_gpu.h @@ -59,7 +59,6 @@ struct msm_gpu_funcs { struct msm_file_private *ctx); void (*flush)(struct msm_gpu *gpu); irqreturn_t (*irq)(struct msm_gpu *irq); - uint32_t (*last_fence)(struct msm_gpu *gpu); void (*recover)(struct msm_gpu *gpu); void (*destroy)(struct msm_gpu *gpu); #ifdef CONFIG_DEBUG_FS @@ -68,6 +67,14 @@ struct msm_gpu_funcs { #endif }; +#define rbmemptr(gpu, member) \ + ((gpu)->memptrs_iova + offsetof(struct msm_rbmemptrs, member)) + +struct msm_rbmemptrs { + volatile uint32_t rptr; + volatile uint32_t fence; +}; + struct msm_gpu { const char *name; struct drm_device *dev; @@ -130,11 +137,17 @@ struct msm_gpu { struct work_struct recover_work; struct list_head submit_list; + + struct msm_rbmemptrs *memptrs; + struct drm_gem_object *memptrs_bo; + uint64_t memptrs_iova; + + }; static inline bool msm_gpu_active(struct msm_gpu *gpu) { - return gpu->fctx->last_fence > gpu->funcs->last_fence(gpu); + return gpu->fctx->last_fence > gpu->memptrs->fence; } /* Perf-Counters: