Blob Blame History Raw
From: Chris Wilson <chris@chris-wilson.co.uk>
Date: Mon, 29 Jan 2018 10:28:40 +0000
Subject: drm/i915: Simplify guard logic for setup_scratch_page()
Git-commit: 7fb9ee5db24a5892d3af8487de39d7b4a7fc2ea8
Patch-mainline: v4.17-rc1
References: FATE#326289 FATE#326079 FATE#326049 FATE#322398 FATE#326166

Older gcc is complaining it can't follow the guards and thinks that
addr may be used uninitialised

In the process, we can simplify down to one loop,
add/remove: 0/0 grow/shrink: 0/1 up/down: 0/-131 (-131)
Function                                     old     new   delta
setup_scratch_page                           545     414    -131

Reported-by: Geert Uytterhoeven <geert@linux-m68k.org>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Matthew Auld <matthew.auld@intel.com>
Reviewed-by: Matthew Auld <matthew.auld@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180129102840.19901-1-chris@chris-wilson.co.uk

Acked-by: Petr Tesarik <ptesarik@suse.com>
---
 drivers/gpu/drm/i915/i915_gem_gtt.c |   69 +++++++++++++++++-------------------
 1 file changed, 33 insertions(+), 36 deletions(-)

--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -543,9 +543,7 @@ static void fill_page_dma_32(struct i915
 static int
 setup_scratch_page(struct i915_address_space *vm, gfp_t gfp)
 {
-	struct page *page = NULL;
-	dma_addr_t addr;
-	int order;
+	unsigned long size;
 
 	/*
 	 * In order to utilize 64K pages for an object with a size < 2M, we will
@@ -559,48 +557,47 @@ setup_scratch_page(struct i915_address_s
 	 * TODO: we should really consider write-protecting the scratch-page and
 	 * sharing between ppgtt
 	 */
+	size = I915_GTT_PAGE_SIZE_4K;
 	if (i915_vm_is_48bit(vm) &&
 	    HAS_PAGE_SIZES(vm->i915, I915_GTT_PAGE_SIZE_64K)) {
-		order = get_order(I915_GTT_PAGE_SIZE_64K);
-		page = alloc_pages(gfp | __GFP_ZERO | __GFP_NOWARN, order);
-		if (page) {
-			addr = dma_map_page(vm->dma, page, 0,
-					    I915_GTT_PAGE_SIZE_64K,
-					    PCI_DMA_BIDIRECTIONAL);
-			if (unlikely(dma_mapping_error(vm->dma, addr))) {
-				__free_pages(page, order);
-				page = NULL;
-			}
-
-			if (!IS_ALIGNED(addr, I915_GTT_PAGE_SIZE_64K)) {
-				dma_unmap_page(vm->dma, addr,
-					       I915_GTT_PAGE_SIZE_64K,
-					       PCI_DMA_BIDIRECTIONAL);
-				__free_pages(page, order);
-				page = NULL;
-			}
-		}
+		size = I915_GTT_PAGE_SIZE_64K;
+		gfp |= __GFP_NOWARN;
 	}
+	gfp |= __GFP_ZERO | __GFP_REPEAT;
 
-	if (!page) {
-		order = 0;
-		page = alloc_page(gfp | __GFP_ZERO);
+	do {
+		int order = get_order(size);
+		struct page *page;
+		dma_addr_t addr;
+
+		page = alloc_pages(gfp, order);
 		if (unlikely(!page))
-			return -ENOMEM;
+			goto skip;
 
-		addr = dma_map_page(vm->dma, page, 0, PAGE_SIZE,
+		addr = dma_map_page(vm->dma, page, 0, size,
 				    PCI_DMA_BIDIRECTIONAL);
-		if (unlikely(dma_mapping_error(vm->dma, addr))) {
-			__free_page(page);
-			return -ENOMEM;
-		}
-	}
+		if (unlikely(dma_mapping_error(vm->dma, addr)))
+			goto free_page;
+
+		if (unlikely(!IS_ALIGNED(addr, size)))
+			goto unmap_page;
 
-	vm->scratch_page.page = page;
-	vm->scratch_page.daddr = addr;
-	vm->scratch_page.order = order;
+		vm->scratch_page.page = page;
+		vm->scratch_page.daddr = addr;
+		vm->scratch_page.order = order;
+		return 0;
+
+unmap_page:
+		dma_unmap_page(vm->dma, addr, size, PCI_DMA_BIDIRECTIONAL);
+free_page:
+		__free_pages(page, order);
+skip:
+		if (size == I915_GTT_PAGE_SIZE_4K)
+			return -ENOMEM;
 
-	return 0;
+		size = I915_GTT_PAGE_SIZE_4K;
+		gfp &= ~__GFP_NOWARN;
+	} while (1);
 }
 
 static void cleanup_scratch_page(struct i915_address_space *vm)