Blob Blame History Raw
From: Matthew Auld <matthew.auld@intel.com>
Date: Mon, 9 Oct 2017 12:00:24 +0100
Subject: drm/i915: s/sg_mask/sg_page_sizes/
Git-commit: 84e8978e62fea661787a216e7fe9abac8f1e056e
Patch-mainline: v4.15-rc1
References: FATE#326289 FATE#326079 FATE#326049 FATE#322398 FATE#326166

It's a little unclear what the sg_mask actually is, so prefer the more
meaningful name of sg_page_sizes.

Suggested-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Signed-off-by: Matthew Auld <matthew.auld@intel.com>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/20171009110024.29114-1-matthew.auld@intel.com
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Acked-by: Petr Tesarik <ptesarik@suse.com>
---
 drivers/gpu/drm/i915/i915_drv.h               |    2 -
 drivers/gpu/drm/i915/i915_gem.c               |   28 +++++++++++++-------------
 drivers/gpu/drm/i915/i915_gem_dmabuf.c        |    6 ++---
 drivers/gpu/drm/i915/i915_gem_internal.c      |    8 +++----
 drivers/gpu/drm/i915/i915_gem_userptr.c       |    6 ++---
 drivers/gpu/drm/i915/selftests/huge_pages.c   |   18 ++++++++--------
 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c |    8 +++----
 7 files changed, 38 insertions(+), 38 deletions(-)

--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -3537,7 +3537,7 @@ i915_gem_object_get_dma_address(struct d
 
 void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
 				 struct sg_table *pages,
-				 unsigned int sg_mask);
+				 unsigned int sg_page_sizes);
 int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
 
 static inline int __must_check
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2335,7 +2335,7 @@ static int i915_gem_object_get_pages_gtt
 	struct page *page;
 	unsigned long last_pfn = 0;	/* suppress gcc warning */
 	unsigned int max_segment = i915_sg_segment_size();
-	unsigned int sg_mask;
+	unsigned int sg_page_sizes;
 	gfp_t noreclaim;
 	int ret;
 
@@ -2367,7 +2367,7 @@ rebuild_st:
 
 	sg = st->sgl;
 	st->nents = 0;
-	sg_mask = 0;
+	sg_page_sizes = 0;
 	for (i = 0; i < page_count; i++) {
 		const unsigned int shrink[] = {
 			I915_SHRINK_BOUND | I915_SHRINK_UNBOUND | I915_SHRINK_PURGEABLE,
@@ -2421,7 +2421,7 @@ rebuild_st:
 		    sg->length >= max_segment ||
 		    page_to_pfn(page) != last_pfn + 1) {
 			if (i) {
-				sg_mask |= sg->length;
+				sg_page_sizes |= sg->length;
 				sg = sg_next(sg);
 			}
 			st->nents++;
@@ -2435,7 +2435,7 @@ rebuild_st:
 		WARN_ON((gfp & __GFP_DMA32) && (last_pfn >= 0x00100000UL));
 	}
 	if (sg) { /* loop terminated early; short sg table */
-		sg_mask |= sg->length;
+		sg_page_sizes |= sg->length;
 		sg_mark_end(sg);
 	}
 
@@ -2466,7 +2466,7 @@ rebuild_st:
 	if (i915_gem_object_needs_bit17_swizzle(obj))
 		i915_gem_object_do_bit_17_swizzle(obj, st);
 
-	__i915_gem_object_set_pages(obj, st, sg_mask);
+	__i915_gem_object_set_pages(obj, st, sg_page_sizes);
 
 	return 0;
 
@@ -2494,7 +2494,7 @@ err_pages:
 
 void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
 				 struct sg_table *pages,
-				 unsigned int sg_mask)
+				 unsigned int sg_page_sizes)
 {
 	struct drm_i915_private *i915 = to_i915(obj->base.dev);
 	unsigned long supported = INTEL_INFO(i915)->page_sizes;
@@ -2514,16 +2514,16 @@ void __i915_gem_object_set_pages(struct
 		obj->mm.quirked = true;
 	}
 
-	GEM_BUG_ON(!sg_mask);
-	obj->mm.page_sizes.phys = sg_mask;
+	GEM_BUG_ON(!sg_page_sizes);
+	obj->mm.page_sizes.phys = sg_page_sizes;
 
 	/*
-	 * Calculate the supported page-sizes which fit into the given sg_mask.
-	 * This will give us the page-sizes which we may be able to use
-	 * opportunistically when later inserting into the GTT. For example if
-	 * phys=2G, then in theory we should be able to use 1G, 2M, 64K or 4K
-	 * pages, although in practice this will depend on a number of other
-	 * factors.
+	 * Calculate the supported page-sizes which fit into the given
+	 * sg_page_sizes. This will give us the page-sizes which we may be able
+	 * to use opportunistically when later inserting into the GTT. For
+	 * example if phys=2G, then in theory we should be able to use 1G, 2M,
+	 * 64K or 4K pages, although in practice this will depend on a number of
+	 * other factors.
 	 */
 	obj->mm.page_sizes.sg = 0;
 	for_each_set_bit(i, &supported, ilog2(I915_GTT_MAX_PAGE_SIZE) + 1) {
--- a/drivers/gpu/drm/i915/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
@@ -259,16 +259,16 @@ struct dma_buf *i915_gem_prime_export(st
 static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj)
 {
 	struct sg_table *pages;
-	unsigned int sg_mask;
+	unsigned int sg_page_sizes;
 
 	pages = dma_buf_map_attachment(obj->base.import_attach,
 				       DMA_BIDIRECTIONAL);
 	if (IS_ERR(pages))
 		return PTR_ERR(pages);
 
-	sg_mask = i915_sg_page_sizes(pages->sgl);
+	sg_page_sizes = i915_sg_page_sizes(pages->sgl);
 
-	__i915_gem_object_set_pages(obj, pages, sg_mask);
+	__i915_gem_object_set_pages(obj, pages, sg_page_sizes);
 
 	return 0;
 }
--- a/drivers/gpu/drm/i915/i915_gem_internal.c
+++ b/drivers/gpu/drm/i915/i915_gem_internal.c
@@ -49,7 +49,7 @@ static int i915_gem_object_get_pages_int
 	struct drm_i915_private *i915 = to_i915(obj->base.dev);
 	struct sg_table *st;
 	struct scatterlist *sg;
-	unsigned int sg_mask;
+	unsigned int sg_page_sizes;
 	unsigned int npages;
 	int max_order;
 	gfp_t gfp;
@@ -88,7 +88,7 @@ create_st:
 
 	sg = st->sgl;
 	st->nents = 0;
-	sg_mask = 0;
+	sg_page_sizes = 0;
 
 	do {
 		int order = min(fls(npages) - 1, max_order);
@@ -106,7 +106,7 @@ create_st:
 		} while (1);
 
 		sg_set_page(sg, page, PAGE_SIZE << order, 0);
-		sg_mask |= PAGE_SIZE << order;
+		sg_page_sizes |= PAGE_SIZE << order;
 		st->nents++;
 
 		npages -= 1 << order;
@@ -135,7 +135,7 @@ create_st:
 	 */
 	obj->mm.madv = I915_MADV_DONTNEED;
 
-	__i915_gem_object_set_pages(obj, st, sg_mask);
+	__i915_gem_object_set_pages(obj, st, sg_page_sizes);
 
 	return 0;
 
--- a/drivers/gpu/drm/i915/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/i915_gem_userptr.c
@@ -405,7 +405,7 @@ __i915_gem_userptr_alloc_pages(struct dr
 {
 	unsigned int max_segment = i915_sg_segment_size();
 	struct sg_table *st;
-	unsigned int sg_mask;
+	unsigned int sg_page_sizes;
 	int ret;
 
 	st = kmalloc(sizeof(*st), GFP_KERNEL);
@@ -435,9 +435,9 @@ alloc_table:
 		return ERR_PTR(ret);
 	}
 
-	sg_mask = i915_sg_page_sizes(st->sgl);
+	sg_page_sizes = i915_sg_page_sizes(st->sgl);
 
-	__i915_gem_object_set_pages(obj, st, sg_mask);
+	__i915_gem_object_set_pages(obj, st, sg_page_sizes);
 
 	return st;
 }
--- a/drivers/gpu/drm/i915/selftests/huge_pages.c
+++ b/drivers/gpu/drm/i915/selftests/huge_pages.c
@@ -68,7 +68,7 @@ static int get_huge_pages(struct drm_i91
 	unsigned int page_mask = obj->mm.page_mask;
 	struct sg_table *st;
 	struct scatterlist *sg;
-	unsigned int sg_mask;
+	unsigned int sg_page_sizes;
 	u64 rem;
 
 	st = kmalloc(sizeof(*st), GFP);
@@ -83,7 +83,7 @@ static int get_huge_pages(struct drm_i91
 	rem = obj->base.size;
 	sg = st->sgl;
 	st->nents = 0;
-	sg_mask = 0;
+	sg_page_sizes = 0;
 
 	/*
 	 * Our goal here is simple, we want to greedily fill the object from
@@ -104,7 +104,7 @@ static int get_huge_pages(struct drm_i91
 				goto err;
 
 			sg_set_page(sg, page, page_size, 0);
-			sg_mask |= page_size;
+			sg_page_sizes |= page_size;
 			st->nents++;
 
 			rem -= page_size;
@@ -124,8 +124,8 @@ static int get_huge_pages(struct drm_i91
 
 	obj->mm.madv = I915_MADV_DONTNEED;
 
-	GEM_BUG_ON(sg_mask != obj->mm.page_mask);
-	__i915_gem_object_set_pages(obj, st, sg_mask);
+	GEM_BUG_ON(sg_page_sizes != obj->mm.page_mask);
+	__i915_gem_object_set_pages(obj, st, sg_page_sizes);
 
 	return 0;
 
@@ -192,7 +192,7 @@ static int fake_get_huge_pages(struct dr
 	const u64 max_len = rounddown_pow_of_two(UINT_MAX);
 	struct sg_table *st;
 	struct scatterlist *sg;
-	unsigned int sg_mask;
+	unsigned int sg_page_sizes;
 	u64 rem;
 
 	st = kmalloc(sizeof(*st), GFP);
@@ -208,7 +208,7 @@ static int fake_get_huge_pages(struct dr
 	rem = obj->base.size;
 	sg = st->sgl;
 	st->nents = 0;
-	sg_mask = 0;
+	sg_page_sizes = 0;
 	do {
 		unsigned int page_size = get_largest_page_size(i915, rem);
 		unsigned int len = min(page_size * div_u64(rem, page_size),
@@ -221,7 +221,7 @@ static int fake_get_huge_pages(struct dr
 		sg_dma_len(sg) = len;
 		sg_dma_address(sg) = page_size;
 
-		sg_mask |= len;
+		sg_page_sizes |= len;
 
 		st->nents++;
 
@@ -236,7 +236,7 @@ static int fake_get_huge_pages(struct dr
 
 	obj->mm.madv = I915_MADV_DONTNEED;
 
-	__i915_gem_object_set_pages(obj, st, sg_mask);
+	__i915_gem_object_set_pages(obj, st, sg_page_sizes);
 
 	return 0;
 }
--- a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
@@ -45,7 +45,7 @@ static int fake_get_pages(struct drm_i91
 #define PFN_BIAS 0x1000
 	struct sg_table *pages;
 	struct scatterlist *sg;
-	unsigned int sg_mask;
+	unsigned int sg_page_sizes;
 	typeof(obj->base.size) rem;
 
 	pages = kmalloc(sizeof(*pages), GFP);
@@ -58,7 +58,7 @@ static int fake_get_pages(struct drm_i91
 		return -ENOMEM;
 	}
 
-	sg_mask = 0;
+	sg_page_sizes = 0;
 	rem = obj->base.size;
 	for (sg = pages->sgl; sg; sg = sg_next(sg)) {
 		unsigned long len = min_t(typeof(rem), rem, BIT(31));
@@ -67,7 +67,7 @@ static int fake_get_pages(struct drm_i91
 		sg_set_page(sg, pfn_to_page(PFN_BIAS), len, 0);
 		sg_dma_address(sg) = page_to_phys(sg_page(sg));
 		sg_dma_len(sg) = len;
-		sg_mask |= len;
+		sg_page_sizes |= len;
 
 		rem -= len;
 	}
@@ -75,7 +75,7 @@ static int fake_get_pages(struct drm_i91
 
 	obj->mm.madv = I915_MADV_DONTNEED;
 
-	__i915_gem_object_set_pages(obj, pages, sg_mask);
+	__i915_gem_object_set_pages(obj, pages, sg_page_sizes);
 
 	return 0;
 #undef GFP