Blob Blame History Raw
From: =?UTF-8?q?Christian=20K=C3=B6nig?= <christian.koenig@amd.com>
Date: Wed, 21 Feb 2018 20:34:13 +0100
Subject: drm/ttm: drop ttm->dummy_read_page
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
Git-commit: 231cdafc75434015f3925d6662a1821fcfef16b7
Patch-mainline: v4.17-rc1
References: FATE#326289 FATE#326079 FATE#326049 FATE#322398 FATE#326166

Only used by the AGP backend and there it can be easily accessed using
ttm->bdev->glob.

Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Michel Dänzer <michel.daenzer@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Acked-by: Petr Tesarik <ptesarik@suse.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c     |    5 ++---
 drivers/gpu/drm/ast/ast_ttm.c               |    5 ++---
 drivers/gpu/drm/bochs/bochs_mm.c            |    5 ++---
 drivers/gpu/drm/cirrus/cirrus_ttm.c         |    5 ++---
 drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c |    5 ++---
 drivers/gpu/drm/mgag200/mgag200_ttm.c       |    5 ++---
 drivers/gpu/drm/nouveau/nouveau_bo.c        |    6 +++---
 drivers/gpu/drm/nouveau/nouveau_sgdma.c     |    5 ++---
 drivers/gpu/drm/nouveau/nouveau_ttm.h       |    3 +--
 drivers/gpu/drm/qxl/qxl_ttm.c               |    6 ++----
 drivers/gpu/drm/radeon/radeon_ttm.c         |    7 +++----
 drivers/gpu/drm/ttm/ttm_agp_backend.c       |    8 ++++----
 drivers/gpu/drm/ttm/ttm_bo.c                |    6 ++----
 drivers/gpu/drm/ttm/ttm_tt.c                |    8 ++------
 drivers/gpu/drm/virtio/virtgpu_ttm.c        |    6 ++----
 drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c      |    9 +++------
 include/drm/ttm/ttm_bo_driver.h             |   17 ++++-------------
 17 files changed, 40 insertions(+), 71 deletions(-)

--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -970,8 +970,7 @@ static struct ttm_backend_func amdgpu_ba
 };
 
 static struct ttm_tt *amdgpu_ttm_tt_create(struct ttm_bo_device *bdev,
-				    unsigned long size, uint32_t page_flags,
-				    struct page *dummy_read_page)
+				    unsigned long size, uint32_t page_flags)
 {
 	struct amdgpu_device *adev;
 	struct amdgpu_ttm_tt *gtt;
@@ -984,7 +983,7 @@ static struct ttm_tt *amdgpu_ttm_tt_crea
 	}
 	gtt->ttm.ttm.func = &amdgpu_backend_func;
 	gtt->adev = adev;
-	if (ttm_dma_tt_init(&gtt->ttm, bdev, size, page_flags, dummy_read_page)) {
+	if (ttm_dma_tt_init(&gtt->ttm, bdev, size, page_flags)) {
 		kfree(gtt);
 		return NULL;
 	}
--- a/drivers/gpu/drm/ast/ast_ttm.c
+++ b/drivers/gpu/drm/ast/ast_ttm.c
@@ -200,8 +200,7 @@ static struct ttm_backend_func ast_tt_ba
 
 
 static struct ttm_tt *ast_ttm_tt_create(struct ttm_bo_device *bdev,
-				 unsigned long size, uint32_t page_flags,
-				 struct page *dummy_read_page)
+				 unsigned long size, uint32_t page_flags)
 {
 	struct ttm_tt *tt;
 
@@ -209,7 +208,7 @@ static struct ttm_tt *ast_ttm_tt_create(
 	if (tt == NULL)
 		return NULL;
 	tt->func = &ast_tt_backend_func;
-	if (ttm_tt_init(tt, bdev, size, page_flags, dummy_read_page)) {
+	if (ttm_tt_init(tt, bdev, size, page_flags)) {
 		kfree(tt);
 		return NULL;
 	}
--- a/drivers/gpu/drm/bochs/bochs_mm.c
+++ b/drivers/gpu/drm/bochs/bochs_mm.c
@@ -178,8 +178,7 @@ static struct ttm_backend_func bochs_tt_
 
 static struct ttm_tt *bochs_ttm_tt_create(struct ttm_bo_device *bdev,
 					  unsigned long size,
-					  uint32_t page_flags,
-					  struct page *dummy_read_page)
+					  uint32_t page_flags)
 {
 	struct ttm_tt *tt;
 
@@ -187,7 +186,7 @@ static struct ttm_tt *bochs_ttm_tt_creat
 	if (tt == NULL)
 		return NULL;
 	tt->func = &bochs_tt_backend_func;
-	if (ttm_tt_init(tt, bdev, size, page_flags, dummy_read_page)) {
+	if (ttm_tt_init(tt, bdev, size, page_flags)) {
 		kfree(tt);
 		return NULL;
 	}
--- a/drivers/gpu/drm/cirrus/cirrus_ttm.c
+++ b/drivers/gpu/drm/cirrus/cirrus_ttm.c
@@ -200,8 +200,7 @@ static struct ttm_backend_func cirrus_tt
 
 
 static struct ttm_tt *cirrus_ttm_tt_create(struct ttm_bo_device *bdev,
-				 unsigned long size, uint32_t page_flags,
-				 struct page *dummy_read_page)
+				 unsigned long size, uint32_t page_flags)
 {
 	struct ttm_tt *tt;
 
@@ -209,7 +208,7 @@ static struct ttm_tt *cirrus_ttm_tt_crea
 	if (tt == NULL)
 		return NULL;
 	tt->func = &cirrus_tt_backend_func;
-	if (ttm_tt_init(tt, bdev, size, page_flags, dummy_read_page)) {
+	if (ttm_tt_init(tt, bdev, size, page_flags)) {
 		kfree(tt);
 		return NULL;
 	}
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c
@@ -202,8 +202,7 @@ static struct ttm_backend_func hibmc_tt_
 
 static struct ttm_tt *hibmc_ttm_tt_create(struct ttm_bo_device *bdev,
 					  unsigned long size,
-					  u32 page_flags,
-					  struct page *dummy_read_page)
+					  u32 page_flags)
 {
 	struct ttm_tt *tt;
 	int ret;
@@ -214,7 +213,7 @@ static struct ttm_tt *hibmc_ttm_tt_creat
 		return NULL;
 	}
 	tt->func = &hibmc_tt_backend_func;
-	ret = ttm_tt_init(tt, bdev, size, page_flags, dummy_read_page);
+	ret = ttm_tt_init(tt, bdev, size, page_flags);
 	if (ret) {
 		DRM_ERROR("failed to initialize ttm_tt: %d\n", ret);
 		kfree(tt);
--- a/drivers/gpu/drm/mgag200/mgag200_ttm.c
+++ b/drivers/gpu/drm/mgag200/mgag200_ttm.c
@@ -200,8 +200,7 @@ static struct ttm_backend_func mgag200_t
 
 
 static struct ttm_tt *mgag200_ttm_tt_create(struct ttm_bo_device *bdev,
-				 unsigned long size, uint32_t page_flags,
-				 struct page *dummy_read_page)
+				 unsigned long size, uint32_t page_flags)
 {
 	struct ttm_tt *tt;
 
@@ -209,7 +208,7 @@ static struct ttm_tt *mgag200_ttm_tt_cre
 	if (tt == NULL)
 		return NULL;
 	tt->func = &mgag200_tt_backend_func;
-	if (ttm_tt_init(tt, bdev, size, page_flags, dummy_read_page)) {
+	if (ttm_tt_init(tt, bdev, size, page_flags)) {
 		kfree(tt);
 		return NULL;
 	}
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -605,18 +605,18 @@ nouveau_bo_wr32(struct nouveau_bo *nvbo,
 
 static struct ttm_tt *
 nouveau_ttm_tt_create(struct ttm_bo_device *bdev, unsigned long size,
-		      uint32_t page_flags, struct page *dummy_read)
+		      uint32_t page_flags)
 {
 #if IS_ENABLED(CONFIG_AGP)
 	struct nouveau_drm *drm = nouveau_bdev(bdev);
 
 	if (drm->agp.bridge) {
 		return ttm_agp_tt_create(bdev, drm->agp.bridge, size,
-					 page_flags, dummy_read);
+					 page_flags);
 	}
 #endif
 
-	return nouveau_sgdma_create_ttm(bdev, size, page_flags, dummy_read);
+	return nouveau_sgdma_create_ttm(bdev, size, page_flags);
 }
 
 static int
--- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
@@ -82,8 +82,7 @@ static struct ttm_backend_func nv50_sgdm
 
 struct ttm_tt *
 nouveau_sgdma_create_ttm(struct ttm_bo_device *bdev,
-			 unsigned long size, uint32_t page_flags,
-			 struct page *dummy_read_page)
+			 unsigned long size, uint32_t page_flags)
 {
 	struct nouveau_drm *drm = nouveau_bdev(bdev);
 	struct nouveau_sgdma_be *nvbe;
@@ -97,7 +96,7 @@ nouveau_sgdma_create_ttm(struct ttm_bo_d
 	else
 		nvbe->ttm.ttm.func = &nv50_sgdma_backend;
 
-	if (ttm_dma_tt_init(&nvbe->ttm, bdev, size, page_flags, dummy_read_page))
+	if (ttm_dma_tt_init(&nvbe->ttm, bdev, size, page_flags))
 		/*
 		 * A failing ttm_dma_tt_init() will call ttm_tt_destroy()
 		 * and thus our nouveau_sgdma_destroy() hook, so we don't need
--- a/drivers/gpu/drm/nouveau/nouveau_ttm.h
+++ b/drivers/gpu/drm/nouveau/nouveau_ttm.h
@@ -12,8 +12,7 @@ extern const struct ttm_mem_type_manager
 extern const struct ttm_mem_type_manager_func nv04_gart_manager;
 
 struct ttm_tt *nouveau_sgdma_create_ttm(struct ttm_bo_device *,
-					unsigned long size, u32 page_flags,
-					struct page *dummy_read_page);
+					unsigned long size, u32 page_flags);
 
 int  nouveau_ttm_init(struct nouveau_drm *drm);
 void nouveau_ttm_fini(struct nouveau_drm *drm);
--- a/drivers/gpu/drm/qxl/qxl_ttm.c
+++ b/drivers/gpu/drm/qxl/qxl_ttm.c
@@ -292,8 +292,7 @@ static struct ttm_backend_func qxl_backe
 };
 
 static struct ttm_tt *qxl_ttm_tt_create(struct ttm_bo_device *bdev,
-					unsigned long size, uint32_t page_flags,
-					struct page *dummy_read_page)
+					unsigned long size, uint32_t page_flags)
 {
 	struct qxl_device *qdev;
 	struct qxl_ttm_tt *gtt;
@@ -304,8 +303,7 @@ static struct ttm_tt *qxl_ttm_tt_create(
 		return NULL;
 	gtt->ttm.ttm.func = &qxl_backend_func;
 	gtt->qdev = qdev;
-	if (ttm_dma_tt_init(&gtt->ttm, bdev, size, page_flags,
-			    dummy_read_page)) {
+	if (ttm_dma_tt_init(&gtt->ttm, bdev, size, page_flags)) {
 		kfree(gtt);
 		return NULL;
 	}
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -687,8 +687,7 @@ static struct ttm_backend_func radeon_ba
 };
 
 static struct ttm_tt *radeon_ttm_tt_create(struct ttm_bo_device *bdev,
-				    unsigned long size, uint32_t page_flags,
-				    struct page *dummy_read_page)
+				    unsigned long size, uint32_t page_flags)
 {
 	struct radeon_device *rdev;
 	struct radeon_ttm_tt *gtt;
@@ -697,7 +696,7 @@ static struct ttm_tt *radeon_ttm_tt_crea
 #if IS_ENABLED(CONFIG_AGP)
 	if (rdev->flags & RADEON_IS_AGP) {
 		return ttm_agp_tt_create(bdev, rdev->ddev->agp->bridge,
-					 size, page_flags, dummy_read_page);
+					 size, page_flags);
 	}
 #endif
 
@@ -707,7 +706,7 @@ static struct ttm_tt *radeon_ttm_tt_crea
 	}
 	gtt->ttm.ttm.func = &radeon_backend_func;
 	gtt->rdev = rdev;
-	if (ttm_dma_tt_init(&gtt->ttm, bdev, size, page_flags, dummy_read_page)) {
+	if (ttm_dma_tt_init(&gtt->ttm, bdev, size, page_flags)) {
 		kfree(gtt);
 		return NULL;
 	}
--- a/drivers/gpu/drm/ttm/ttm_agp_backend.c
+++ b/drivers/gpu/drm/ttm/ttm_agp_backend.c
@@ -50,6 +50,7 @@ struct ttm_agp_backend {
 static int ttm_agp_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
 {
 	struct ttm_agp_backend *agp_be = container_of(ttm, struct ttm_agp_backend, ttm);
+	struct page *dummy_read_page = ttm->bdev->glob->dummy_read_page;
 	struct drm_mm_node *node = bo_mem->mm_node;
 	struct agp_memory *mem;
 	int ret, cached = (bo_mem->placement & TTM_PL_FLAG_CACHED);
@@ -64,7 +65,7 @@ static int ttm_agp_bind(struct ttm_tt *t
 		struct page *page = ttm->pages[i];
 
 		if (!page)
-			page = ttm->dummy_read_page;
+			page = dummy_read_page;
 
 		mem->pages[mem->page_count++] = page;
 	}
@@ -111,8 +112,7 @@ static struct ttm_backend_func ttm_agp_f
 
 struct ttm_tt *ttm_agp_tt_create(struct ttm_bo_device *bdev,
 				 struct agp_bridge_data *bridge,
-				 unsigned long size, uint32_t page_flags,
-				 struct page *dummy_read_page)
+				 unsigned long size, uint32_t page_flags)
 {
 	struct ttm_agp_backend *agp_be;
 
@@ -124,7 +124,7 @@ struct ttm_tt *ttm_agp_tt_create(struct
 	agp_be->bridge = bridge;
 	agp_be->ttm.func = &ttm_agp_func;
 
-	if (ttm_tt_init(&agp_be->ttm, bdev, size, page_flags, dummy_read_page)) {
+	if (ttm_tt_init(&agp_be->ttm, bdev, size, page_flags)) {
 		kfree(agp_be);
 		return NULL;
 	}
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -228,7 +228,6 @@ EXPORT_SYMBOL(ttm_bo_move_to_lru_tail);
 static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc)
 {
 	struct ttm_bo_device *bdev = bo->bdev;
-	struct ttm_bo_global *glob = bdev->glob;
 	int ret = 0;
 	uint32_t page_flags = 0;
 
@@ -247,14 +246,13 @@ static int ttm_bo_add_ttm(struct ttm_buf
 			page_flags |= TTM_PAGE_FLAG_ZERO_ALLOC;
 	case ttm_bo_type_kernel:
 		bo->ttm = bdev->driver->ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT,
-						      page_flags, glob->dummy_read_page);
+						      page_flags);
 		if (unlikely(bo->ttm == NULL))
 			ret = -ENOMEM;
 		break;
 	case ttm_bo_type_sg:
 		bo->ttm = bdev->driver->ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT,
-						      page_flags | TTM_PAGE_FLAG_SG,
-						      glob->dummy_read_page);
+						      page_flags | TTM_PAGE_FLAG_SG);
 		if (unlikely(bo->ttm == NULL)) {
 			ret = -ENOMEM;
 			break;
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -191,14 +191,12 @@ void ttm_tt_destroy(struct ttm_tt *ttm)
 }
 
 int ttm_tt_init(struct ttm_tt *ttm, struct ttm_bo_device *bdev,
-		unsigned long size, uint32_t page_flags,
-		struct page *dummy_read_page)
+		unsigned long size, uint32_t page_flags)
 {
 	ttm->bdev = bdev;
 	ttm->num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
 	ttm->caching_state = tt_cached;
 	ttm->page_flags = page_flags;
-	ttm->dummy_read_page = dummy_read_page;
 	ttm->state = tt_unpopulated;
 	ttm->swap_storage = NULL;
 
@@ -219,8 +217,7 @@ void ttm_tt_fini(struct ttm_tt *ttm)
 EXPORT_SYMBOL(ttm_tt_fini);
 
 int ttm_dma_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_bo_device *bdev,
-		unsigned long size, uint32_t page_flags,
-		struct page *dummy_read_page)
+		    unsigned long size, uint32_t page_flags)
 {
 	struct ttm_tt *ttm = &ttm_dma->ttm;
 
@@ -228,7 +225,6 @@ int ttm_dma_tt_init(struct ttm_dma_tt *t
 	ttm->num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
 	ttm->caching_state = tt_cached;
 	ttm->page_flags = page_flags;
-	ttm->dummy_read_page = dummy_read_page;
 	ttm->state = tt_unpopulated;
 	ttm->swap_storage = NULL;
 
--- a/drivers/gpu/drm/virtio/virtgpu_ttm.c
+++ b/drivers/gpu/drm/virtio/virtgpu_ttm.c
@@ -324,8 +324,7 @@ static struct ttm_backend_func virtio_gp
 
 static struct ttm_tt *virtio_gpu_ttm_tt_create(struct ttm_bo_device *bdev,
 					       unsigned long size,
-					       uint32_t page_flags,
-					       struct page *dummy_read_page)
+					       uint32_t page_flags)
 {
 	struct virtio_gpu_device *vgdev;
 	struct virtio_gpu_ttm_tt *gtt;
@@ -336,8 +335,7 @@ static struct ttm_tt *virtio_gpu_ttm_tt_
 		return NULL;
 	gtt->ttm.ttm.func = &virtio_gpu_backend_func;
 	gtt->vgdev = vgdev;
-	if (ttm_dma_tt_init(&gtt->ttm, bdev, size, page_flags,
-			    dummy_read_page)) {
+	if (ttm_dma_tt_init(&gtt->ttm, bdev, size, page_flags)) {
 		kfree(gtt);
 		return NULL;
 	}
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
@@ -694,8 +694,7 @@ static struct ttm_backend_func vmw_ttm_f
 };
 
 static struct ttm_tt *vmw_ttm_tt_create(struct ttm_bo_device *bdev,
-				 unsigned long size, uint32_t page_flags,
-				 struct page *dummy_read_page)
+				 unsigned long size, uint32_t page_flags)
 {
 	struct vmw_ttm_tt *vmw_be;
 	int ret;
@@ -709,11 +708,9 @@ static struct ttm_tt *vmw_ttm_tt_create(
 	vmw_be->mob = NULL;
 
 	if (vmw_be->dev_priv->map_mode == vmw_dma_alloc_coherent)
-		ret = ttm_dma_tt_init(&vmw_be->dma_ttm, bdev, size, page_flags,
-				      dummy_read_page);
+		ret = ttm_dma_tt_init(&vmw_be->dma_ttm, bdev, size, page_flags);
 	else
-		ret = ttm_tt_init(&vmw_be->dma_ttm.ttm, bdev, size, page_flags,
-				  dummy_read_page);
+		ret = ttm_tt_init(&vmw_be->dma_ttm.ttm, bdev, size, page_flags);
 	if (unlikely(ret != 0))
 		goto out_no_init;
 
--- a/include/drm/ttm/ttm_bo_driver.h
+++ b/include/drm/ttm/ttm_bo_driver.h
@@ -100,7 +100,6 @@ enum ttm_caching_state {
  * @bdev: Pointer to a struct ttm_bo_device.
  * @func: Pointer to a struct ttm_backend_func that describes
  * the backend methods.
- * @dummy_read_page: Page to map where the ttm_tt page array contains a NULL
  * pointer.
  * @pages: Array of pages backing the data.
  * @num_pages: Number of pages in the page array.
@@ -118,7 +117,6 @@ enum ttm_caching_state {
 struct ttm_tt {
 	struct ttm_bo_device *bdev;
 	struct ttm_backend_func *func;
-	struct page *dummy_read_page;
 	struct page **pages;
 	uint32_t page_flags;
 	unsigned long num_pages;
@@ -331,7 +329,6 @@ struct ttm_bo_driver {
 	 * @bdev: pointer to a struct ttm_bo_device:
 	 * @size: Size of the data needed backing.
 	 * @page_flags: Page flags as identified by TTM_PAGE_FLAG_XX flags.
-	 * @dummy_read_page: See struct ttm_bo_device.
 	 *
 	 * Create a struct ttm_tt to back data with system memory pages.
 	 * No pages are actually allocated.
@@ -340,8 +337,7 @@ struct ttm_bo_driver {
 	 */
 	struct ttm_tt *(*ttm_tt_create)(struct ttm_bo_device *bdev,
 					unsigned long size,
-					uint32_t page_flags,
-					struct page *dummy_read_page);
+					uint32_t page_flags);
 
 	/**
 	 * ttm_tt_populate
@@ -621,7 +617,6 @@ ttm_flag_masked(uint32_t *old, uint32_t
  * @bdev: pointer to a struct ttm_bo_device:
  * @size: Size of the data needed backing.
  * @page_flags: Page flags as identified by TTM_PAGE_FLAG_XX flags.
- * @dummy_read_page: See struct ttm_bo_device.
  *
  * Create a struct ttm_tt to back data with system memory pages.
  * No pages are actually allocated.
@@ -629,11 +624,9 @@ ttm_flag_masked(uint32_t *old, uint32_t
  * NULL: Out of memory.
  */
 int ttm_tt_init(struct ttm_tt *ttm, struct ttm_bo_device *bdev,
-		unsigned long size, uint32_t page_flags,
-		struct page *dummy_read_page);
+		unsigned long size, uint32_t page_flags);
 int ttm_dma_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_bo_device *bdev,
-		    unsigned long size, uint32_t page_flags,
-		    struct page *dummy_read_page);
+		    unsigned long size, uint32_t page_flags);
 
 /**
  * ttm_tt_fini
@@ -1080,7 +1073,6 @@ extern const struct ttm_mem_type_manager
  * @bridge: The agp bridge this device is sitting on.
  * @size: Size of the data needed backing.
  * @page_flags: Page flags as identified by TTM_PAGE_FLAG_XX flags.
- * @dummy_read_page: See struct ttm_bo_device.
  *
  *
  * Create a TTM backend that uses the indicated AGP bridge as an aperture
@@ -1089,8 +1081,7 @@ extern const struct ttm_mem_type_manager
  */
 struct ttm_tt *ttm_agp_tt_create(struct ttm_bo_device *bdev,
 				 struct agp_bridge_data *bridge,
-				 unsigned long size, uint32_t page_flags,
-				 struct page *dummy_read_page);
+				 unsigned long size, uint32_t page_flags);
 int ttm_agp_tt_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx);
 void ttm_agp_tt_unpopulate(struct ttm_tt *ttm);
 #endif