|
Mel Gorman |
aa9527 |
From 3eccd908230ffab0f93333714cce2415505a82fc Mon Sep 17 00:00:00 2001
|
|
Mel Gorman |
aa9527 |
From: Mike Rapoport <rppt@kernel.org>
|
|
Mel Gorman |
aa9527 |
Date: Tue, 7 Sep 2021 19:54:55 -0700
|
|
Mel Gorman |
aa9527 |
Subject: [PATCH] mm: memory_hotplug: cleanup after removal of
|
|
Mel Gorman |
aa9527 |
pfn_valid_within()
|
|
Mel Gorman |
aa9527 |
|
|
Mel Gorman |
aa9527 |
References: bsc#1190208 (MM functional and performance backports)
|
|
Mel Gorman |
aa9527 |
Patch-mainline: v5.15-rc1
|
|
Mel Gorman |
aa9527 |
Git-commit: 673d40c82eb2200da32ae9cc9cac8c584b66b5a9
|
|
Mel Gorman |
aa9527 |
|
|
Mel Gorman |
aa9527 |
When test_pages_in_a_zone() used pfn_valid_within() is has some logic
|
|
Mel Gorman |
aa9527 |
surrounding pfn_valid_within() checks.
|
|
Mel Gorman |
aa9527 |
|
|
Mel Gorman |
aa9527 |
Since pfn_valid_within() is gone, this logic can be removed.
|
|
Mel Gorman |
aa9527 |
|
|
Mel Gorman |
aa9527 |
Link: https://lkml.kernel.org/r/20210713080035.7464-3-rppt@kernel.org
|
|
Mel Gorman |
aa9527 |
Signed-off-by: Mike Rapoport <rppt@linux.ibm.com>
|
|
Mel Gorman |
aa9527 |
Acked-by: David Hildenbrand <david@redhat.com>
|
|
Mel Gorman |
aa9527 |
Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
|
|
Mel Gorman |
aa9527 |
Cc: "Rafael J. Wysocki" <rafael@kernel.org>
|
|
Mel Gorman |
aa9527 |
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
|
|
Mel Gorman |
aa9527 |
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
|
|
Mel Gorman |
aa9527 |
Signed-off-by: Mel Gorman <mgorman@suse.de>
|
|
Mel Gorman |
aa9527 |
---
|
|
Mel Gorman |
aa9527 |
mm/memory_hotplug.c | 9 +++------
|
|
Mel Gorman |
aa9527 |
1 file changed, 3 insertions(+), 6 deletions(-)
|
|
Mel Gorman |
aa9527 |
|
|
Mel Gorman |
aa9527 |
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
|
|
Mel Gorman |
aa9527 |
index 3bbe50d08760..6ea62efe2a8f 100644
|
|
Mel Gorman |
aa9527 |
--- a/mm/memory_hotplug.c
|
|
Mel Gorman |
aa9527 |
+++ b/mm/memory_hotplug.c
|
|
Mel Gorman |
aa9527 |
@@ -1298,7 +1298,7 @@ struct zone *test_pages_in_a_zone(unsigned long start_pfn,
|
|
Mel Gorman |
aa9527 |
unsigned long pfn, sec_end_pfn;
|
|
Mel Gorman |
aa9527 |
struct zone *zone = NULL;
|
|
Mel Gorman |
aa9527 |
struct page *page;
|
|
Mel Gorman |
aa9527 |
- int i;
|
|
Mel Gorman |
aa9527 |
+
|
|
Mel Gorman |
aa9527 |
for (pfn = start_pfn, sec_end_pfn = SECTION_ALIGN_UP(start_pfn + 1);
|
|
Mel Gorman |
aa9527 |
pfn < end_pfn;
|
|
Mel Gorman |
aa9527 |
pfn = sec_end_pfn, sec_end_pfn += PAGES_PER_SECTION) {
|
|
Mel Gorman |
aa9527 |
@@ -1307,13 +1307,10 @@ struct zone *test_pages_in_a_zone(unsigned long start_pfn,
|
|
Mel Gorman |
aa9527 |
continue;
|
|
Mel Gorman |
aa9527 |
for (; pfn < sec_end_pfn && pfn < end_pfn;
|
|
Mel Gorman |
aa9527 |
pfn += MAX_ORDER_NR_PAGES) {
|
|
Mel Gorman |
aa9527 |
- i = 0;
|
|
Mel Gorman |
aa9527 |
- if (i == MAX_ORDER_NR_PAGES || pfn + i >= end_pfn)
|
|
Mel Gorman |
aa9527 |
- continue;
|
|
Mel Gorman |
aa9527 |
/* Check if we got outside of the zone */
|
|
Mel Gorman |
aa9527 |
- if (zone && !zone_spans_pfn(zone, pfn + i))
|
|
Mel Gorman |
aa9527 |
+ if (zone && !zone_spans_pfn(zone, pfn))
|
|
Mel Gorman |
aa9527 |
return NULL;
|
|
Mel Gorman |
aa9527 |
- page = pfn_to_page(pfn + i);
|
|
Mel Gorman |
aa9527 |
+ page = pfn_to_page(pfn);
|
|
Mel Gorman |
aa9527 |
if (zone && page_zone(page) != zone)
|
|
Mel Gorman |
aa9527 |
return NULL;
|
|
Mel Gorman |
aa9527 |
zone = page_zone(page);
|