Blob Blame History Raw
From 04f1e657384ebf1f75ef21f74b02cd1eb4be8693 Mon Sep 17 00:00:00 2001
From: Firo Yang <fyang@suse.com>
Date: Mon, 3 Jun 2019 08:09:17 +0200
Subject: [PATCH 1/1] mm: pagechage-limit: Calculate pagecache-limit based on
 node state

References: FATE309111, bsc#1136811
Patch-mainline: Never, SUSE specific

Since '599d0c954f91 mm, vmscan: move LRU lists to node', LRU lists
related accounting is changed to be node-based. So update related
functions to keep consistency with that change.

Credit to HPE engineer for reporting and analyzing the
issue bsc#1136811.

Signed-off-by: Firo Yang <fyang@suse.com>
---
 mm/page_alloc.c | 12 ++++++------
 mm/vmscan.c     |  2 +-
 2 files changed, 7 insertions(+), 7 deletions(-)

diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 20f5463..c4459d6 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -7801,21 +7801,21 @@ unsigned long pagecache_over_limit()
 	 * not seem to be guaranteed. (Maybe this was just an oprofile
 	 * bug?).
 	 * (FIXME: Do we need to subtract NR_FILE_DIRTY here as well?) */
-	unsigned long pgcache_pages = global_page_state(NR_FILE_PAGES)
+	unsigned long pgcache_pages = global_node_page_state(NR_FILE_PAGES)
 				    - max_t(unsigned long,
-					    global_page_state(NR_FILE_MAPPED),
-					    global_page_state(NR_SHMEM));
+					    global_node_page_state(NR_FILE_MAPPED),
+					    global_node_page_state(NR_SHMEM));
 	/* We certainly can't free more than what's on the LRU lists
 	 * minus the dirty ones. (FIXME: pages accounted for in NR_WRITEBACK
 	 * are not on the LRU lists  any more, right?) */
-	unsigned long pgcache_lru_pages = global_page_state(NR_ACTIVE_FILE)
-				        + global_page_state(NR_INACTIVE_FILE);
+	unsigned long pgcache_lru_pages = global_node_page_state(NR_ACTIVE_FILE)
+				        + global_node_page_state(NR_INACTIVE_FILE);
 	unsigned long free_pages = global_page_state(NR_FREE_PAGES);
 	unsigned long swap_pages = total_swap_pages - get_nr_swap_pages();
 	unsigned long limit;
 
 	if (vm_pagecache_ignore_dirty != 0)
-		pgcache_lru_pages -= global_page_state(NR_FILE_DIRTY)
+		pgcache_lru_pages -= global_node_page_state(NR_FILE_DIRTY)
 				     /vm_pagecache_ignore_dirty;
 	/* Paranoia */
 	if (unlikely(pgcache_lru_pages > LONG_MAX))
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 2059460..2b97725 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -3778,7 +3778,7 @@ static int shrink_all_nodes(unsigned long nr_pages, int pass,
 		nr_locked_zones++;
 
 		for_each_evictable_lru(lru) {
-			enum zone_stat_item ls = NR_LRU_BASE + lru;
+			enum node_stat_item ls = NR_LRU_BASE + lru;
 			unsigned long lru_pages = node_page_state(pgdat, ls);
 
 			/* For pass = 0, we don't shrink the active list */
-- 
2.7.0