Blob Blame History Raw
From: =?UTF-8?q?Michel=20D=C3=A4nzer?= <michel.daenzer@amd.com>
Date: Fri, 3 Nov 2017 16:00:35 +0100
Subject: drm/ttm: Downgrade pr_err to pr_debug for memory allocation failures
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
Git-commit: 767601d100a53e653233aebca7c262ce0addfa99
Patch-mainline: v4.15-rc1
References: FATE#326289 FATE#326079 FATE#326049 FATE#322398 FATE#326166

Memory allocation failure should generally be handled gracefully by
callers. In particular, with transparent hugepage support, attempts
to allocate huge pages can fail under memory pressure, but the callers
fall back to allocating individual pages instead. In that case, there
would be spurious

 [TTM] Unable to get page %u

error messages in dmesg.

Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Michel Dänzer <michel.daenzer@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Acked-by: Petr Tesarik <ptesarik@suse.com>
---
 drivers/gpu/drm/ttm/ttm_page_alloc.c     |   13 ++++++-------
 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c |   12 ++++++------
 2 files changed, 12 insertions(+), 13 deletions(-)

--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
@@ -329,7 +329,7 @@ static int ttm_page_pool_free(struct ttm
 		pages_to_free = kmalloc(npages_to_free * sizeof(struct page *),
 					GFP_KERNEL);
 	if (!pages_to_free) {
-		pr_err("Failed to allocate memory for pool free operation\n");
+		pr_debug("Failed to allocate memory for pool free operation\n");
 		return 0;
 	}
 
@@ -517,7 +517,7 @@ static int ttm_alloc_new_pages(struct li
 	caching_array = kmalloc(max_cpages*sizeof(struct page *), GFP_KERNEL);
 
 	if (!caching_array) {
-		pr_err("Unable to allocate table for new pages\n");
+		pr_debug("Unable to allocate table for new pages\n");
 		return -ENOMEM;
 	}
 
@@ -525,7 +525,7 @@ static int ttm_alloc_new_pages(struct li
 		p = alloc_pages(gfp_flags, order);
 
 		if (!p) {
-			pr_err("Unable to get page %u\n", i);
+			pr_debug("Unable to get page %u\n", i);
 
 			/* store already allocated pages in the pool after
 			 * setting the caching state */
@@ -625,7 +625,7 @@ static void ttm_page_pool_fill_locked(st
 			++pool->nrefills;
 			pool->npages += alloc_size;
 		} else {
-			pr_err("Failed to fill pool (%p)\n", pool);
+			pr_debug("Failed to fill pool (%p)\n", pool);
 			/* If we have any pages left put them to the pool. */
 			list_for_each_entry(p, &new_pages, lru) {
 				++cpages;
@@ -885,8 +885,7 @@ static int ttm_get_pages(struct page **p
 		while (npages) {
 			p = alloc_page(gfp_flags);
 			if (!p) {
-
-				pr_err("Unable to allocate page\n");
+				pr_debug("Unable to allocate page\n");
 				return -ENOMEM;
 			}
 
@@ -925,7 +924,7 @@ static int ttm_get_pages(struct page **p
 		/* If there is any pages in the list put them back to
 		 * the pool.
 		 */
-		pr_err("Failed to allocate extra pages for large request\n");
+		pr_debug("Failed to allocate extra pages for large request\n");
 		ttm_put_pages(pages, count, flags, cstate);
 		return r;
 	}
--- a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
@@ -463,7 +463,7 @@ static unsigned ttm_dma_page_pool_free(s
 					GFP_KERNEL);
 
 	if (!pages_to_free) {
-		pr_err("%s: Failed to allocate memory for pool free operation\n",
+		pr_debug("%s: Failed to allocate memory for pool free operation\n",
 		       pool->dev_name);
 		return 0;
 	}
@@ -755,7 +755,7 @@ static int ttm_dma_pool_alloc_new_pages(
 	caching_array = kmalloc(max_cpages*sizeof(struct page *), GFP_KERNEL);
 
 	if (!caching_array) {
-		pr_err("%s: Unable to allocate table for new pages\n",
+		pr_debug("%s: Unable to allocate table for new pages\n",
 		       pool->dev_name);
 		return -ENOMEM;
 	}
@@ -768,8 +768,8 @@ static int ttm_dma_pool_alloc_new_pages(
 	for (i = 0, cpages = 0; i < count; ++i) {
 		dma_p = __ttm_dma_alloc_page(pool);
 		if (!dma_p) {
-			pr_err("%s: Unable to get page %u\n",
-			       pool->dev_name, i);
+			pr_debug("%s: Unable to get page %u\n",
+				 pool->dev_name, i);
 
 			/* store already allocated pages in the pool after
 			 * setting the caching state */
@@ -855,8 +855,8 @@ static int ttm_dma_page_pool_fill_locked
 			struct dma_page *d_page;
 			unsigned cpages = 0;
 
-			pr_err("%s: Failed to fill %s pool (r:%d)!\n",
-			       pool->dev_name, pool->name, r);
+			pr_debug("%s: Failed to fill %s pool (r:%d)!\n",
+				 pool->dev_name, pool->name, r);
 
 			list_for_each_entry(d_page, &d_pages, page_list) {
 				cpages++;