Blob Blame History Raw
From: Thomas Hellstrom <thellstrom@vmware.com>
Date: Tue, 16 Jan 2018 09:12:05 +0100
Subject: drm/ttm: Export the ttm_k[un]map_atomic_prot API.
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
Git-commit: 9c11fcf1a74d338774faa059b0aea24264c95658
Patch-mainline: v4.17-rc1
References: FATE#326289 FATE#326079 FATE#326049 FATE#322398 FATE#326166

It will be used by vmwgfx cpu blit.

Reviewed-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Brian Paul <brianp@vmware.com>
Signed-off-by: Thomas Hellstrom <thellstrom@vmware.com>
Acked-by: Petr Tesarik <ptesarik@suse.com>
---
 drivers/gpu/drm/ttm/ttm_bo_util.c |   31 ++++++++++++++++++++++++++-----
 include/drm/ttm/ttm_bo_api.h      |    4 ++++
 2 files changed, 30 insertions(+), 5 deletions(-)

--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -263,24 +263,45 @@ static int ttm_copy_io_page(void *dst, v
 #define __ttm_kunmap_atomic(__addr) vunmap(__addr)
 #endif
 
-static void *ttm_kmap_atomic_prot(struct page *page,
-				  pgprot_t prot)
+
+/**
+ * ttm_kmap_atomic_prot - Efficient kernel map of a single page with
+ * specified page protection.
+ *
+ * @page: The page to map.
+ * @prot: The page protection.
+ *
+ * This function maps a TTM page using the kmap_atomic api if available,
+ * otherwise falls back to vmap. The user must make sure that the
+ * specified page does not have an aliased mapping with a different caching
+ * policy unless the architecture explicitly allows it. Also mapping and
+ * unmapping using this api must be correctly nested. Unmapping should
+ * occur in the reverse order of mapping.
+ */
+void *ttm_kmap_atomic_prot(struct page *page, pgprot_t prot)
 {
 	if (pgprot_val(prot) == pgprot_val(PAGE_KERNEL))
 		return kmap_atomic(page);
 	else
 		return __ttm_kmap_atomic_prot(page, prot);
 }
+EXPORT_SYMBOL(ttm_kmap_atomic_prot);
 
-
-static void ttm_kunmap_atomic_prot(void *addr,
-				   pgprot_t prot)
+/**
+ * ttm_kunmap_atomic_prot - Unmap a page that was mapped using
+ * ttm_kmap_atomic_prot.
+ *
+ * @addr: The virtual address from the map.
+ * @prot: The page protection.
+ */
+void ttm_kunmap_atomic_prot(void *addr, pgprot_t prot)
 {
 	if (pgprot_val(prot) == pgprot_val(PAGE_KERNEL))
 		kunmap_atomic(addr);
 	else
 		__ttm_kunmap_atomic(addr);
 }
+EXPORT_SYMBOL(ttm_kunmap_atomic_prot);
 
 static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src,
 				unsigned long page,
--- a/include/drm/ttm/ttm_bo_api.h
+++ b/include/drm/ttm/ttm_bo_api.h
@@ -709,6 +709,10 @@ int ttm_fbdev_mmap(struct vm_area_struct
 int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma,
 		struct ttm_bo_device *bdev);
 
+void *ttm_kmap_atomic_prot(struct page *page, pgprot_t prot);
+
+void ttm_kunmap_atomic_prot(void *addr, pgprot_t prot);
+
 /**
  * ttm_bo_io
  *