Blob Blame History Raw
From: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
Date: Fri, 21 Apr 2017 00:33:55 +0300
Subject: drm: omapdrm: Rename GEM DMA sync functions
Git-commit: d61ce7da02a3c52317474f2a15dd610ec652d513
Patch-mainline: v4.13-rc1
References: FATE#326289 FATE#326079 FATE#326049 FATE#322398 FATE#326166

The omap_gem_cpu_sync() function operates at a page level, while the
omap_gem_dma_sync() function operates at a buffer level. Rename them to
omap_gem_cpu_sync_page() and omap_gem_dma_sync_buffer() respectively to
avoid confusion.

Signed-off-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
Signed-off-by: Tomi Valkeinen <tomi.valkeinen@ti.com>
Acked-by: Petr Tesarik <ptesarik@suse.com>
---
 drivers/gpu/drm/omapdrm/omap_drv.h        |    4 ++--
 drivers/gpu/drm/omapdrm/omap_fb.c         |    2 +-
 drivers/gpu/drm/omapdrm/omap_gem.c        |    6 +++---
 drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c |    6 +++---
 4 files changed, 9 insertions(+), 9 deletions(-)

--- a/drivers/gpu/drm/omapdrm/omap_drv.h
+++ b/drivers/gpu/drm/omapdrm/omap_drv.h
@@ -185,8 +185,8 @@ int omap_gem_mmap_obj(struct drm_gem_obj
 		struct vm_area_struct *vma);
 int omap_gem_fault(struct vm_fault *vmf);
 int omap_gem_roll(struct drm_gem_object *obj, uint32_t roll);
-void omap_gem_cpu_sync(struct drm_gem_object *obj, int pgoff);
-void omap_gem_dma_sync(struct drm_gem_object *obj,
+void omap_gem_cpu_sync_page(struct drm_gem_object *obj, int pgoff);
+void omap_gem_dma_sync_buffer(struct drm_gem_object *obj,
 		enum dma_data_direction dir);
 int omap_gem_pin(struct drm_gem_object *obj, dma_addr_t *dma_addr);
 void omap_gem_unpin(struct drm_gem_object *obj);
--- a/drivers/gpu/drm/omapdrm/omap_fb.c
+++ b/drivers/gpu/drm/omapdrm/omap_fb.c
@@ -262,7 +262,7 @@ int omap_framebuffer_pin(struct drm_fram
 		ret = omap_gem_pin(plane->bo, &plane->dma_addr);
 		if (ret)
 			goto fail;
-		omap_gem_dma_sync(plane->bo, DMA_TO_DEVICE);
+		omap_gem_dma_sync_buffer(plane->bo, DMA_TO_DEVICE);
 	}
 
 	omap_fb->pin_count++;
--- a/drivers/gpu/drm/omapdrm/omap_gem.c
+++ b/drivers/gpu/drm/omapdrm/omap_gem.c
@@ -388,7 +388,7 @@ static int fault_1d(struct drm_gem_objec
 	pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
 
 	if (omap_obj->pages) {
-		omap_gem_cpu_sync(obj, pgoff);
+		omap_gem_cpu_sync_page(obj, pgoff);
 		pfn = page_to_pfn(omap_obj->pages[pgoff]);
 	} else {
 		BUG_ON(!is_contiguous(omap_obj));
@@ -734,7 +734,7 @@ static inline bool is_cached_coherent(st
 /* Sync the buffer for CPU access.. note pages should already be
  * attached, ie. omap_gem_get_pages()
  */
-void omap_gem_cpu_sync(struct drm_gem_object *obj, int pgoff)
+void omap_gem_cpu_sync_page(struct drm_gem_object *obj, int pgoff)
 {
 	struct drm_device *dev = obj->dev;
 	struct omap_gem_object *omap_obj = to_omap_bo(obj);
@@ -747,7 +747,7 @@ void omap_gem_cpu_sync(struct drm_gem_ob
 }
 
 /* sync the buffer for DMA access */
-void omap_gem_dma_sync(struct drm_gem_object *obj,
+void omap_gem_dma_sync_buffer(struct drm_gem_object *obj,
 		enum dma_data_direction dir)
 {
 	struct drm_device *dev = obj->dev;
--- a/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c
+++ b/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c
@@ -55,7 +55,7 @@ static struct sg_table *omap_gem_map_dma
 	sg_dma_address(sg->sgl) = dma_addr;
 
 	/* this must be after omap_gem_pin() to ensure we have pages attached */
-	omap_gem_dma_sync(obj, dir);
+	omap_gem_dma_sync_buffer(obj, dir);
 
 	return sg;
 out:
@@ -112,7 +112,7 @@ static void *omap_gem_dmabuf_kmap_atomic
 	struct drm_gem_object *obj = buffer->priv;
 	struct page **pages;
 	omap_gem_get_pages(obj, &pages, false);
-	omap_gem_cpu_sync(obj, page_num);
+	omap_gem_cpu_sync_page(obj, page_num);
 	return kmap_atomic(pages[page_num]);
 }
 
@@ -128,7 +128,7 @@ static void *omap_gem_dmabuf_kmap(struct
 	struct drm_gem_object *obj = buffer->priv;
 	struct page **pages;
 	omap_gem_get_pages(obj, &pages, false);
-	omap_gem_cpu_sync(obj, page_num);
+	omap_gem_cpu_sync_page(obj, page_num);
 	return kmap(pages[page_num]);
 }