From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Thu, 29 Jan 2015 17:19:44 +0100
Subject: mm/workingset: Do not protect workingset_shadow_nodes with irq off
Git-repo: git://git.kernel.org/pub/scm/linux/kernel/git/rt/linux-rt-devel.git
Git-commit: a08096a6716603d9bc90e972782cdaf749a77a32
Patch-mainline: Queued in subsystem maintainer repository
References: SLE Realtime Extension
workingset_shadow_nodes is protected by local_irq_disable(). Some users
use spin_lock_irq().
Replace the irq/on with a local_lock(). Rename workingset_shadow_nodes
so I catch users of it which will be introduced later.
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Signed-off-by: Mike Galbraith <mgalbraith@suse.de>
---
include/linux/swap.h | 8 +++++---
mm/filemap.c | 12 +++++++++---
mm/truncate.c | 4 +++-
mm/workingset.c | 31 ++++++++++++++++---------------
4 files changed, 33 insertions(+), 22 deletions(-)
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -11,6 +11,7 @@
#include <linux/fs.h>
#include <linux/atomic.h>
#include <linux/page-flags.h>
+#include <linux/locallock.h>
#include <asm/page.h>
struct notifier_block;
@@ -256,12 +257,13 @@ bool workingset_refault(void *shadow);
void workingset_activation(struct page *page);
/* Do not use directly, use workingset_lookup_update */
-void workingset_update_node(struct radix_tree_node *node);
+void __workingset_update_node(struct radix_tree_node *node);
+DECLARE_LOCAL_IRQ_LOCK(shadow_nodes_lock);
/* Returns workingset_update_node() if the mapping has shadow entries. */
-#define workingset_lookup_update(mapping) \
+#define __workingset_lookup_update(mapping) \
({ \
- radix_tree_update_node_t __helper = workingset_update_node; \
+ radix_tree_update_node_t __helper = __workingset_update_node; \
if (dax_mapping(mapping) || shmem_mapping(mapping)) \
__helper = NULL; \
__helper; \
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -134,8 +134,10 @@ static int page_cache_tree_insert(struct
if (shadowp)
*shadowp = p;
}
+ local_lock(shadow_nodes_lock);
__radix_tree_replace(&mapping->page_tree, node, slot, page,
- workingset_lookup_update(mapping));
+ __workingset_lookup_update(mapping));
+ local_unlock(shadow_nodes_lock);
mapping->nrpages++;
return 0;
}
@@ -152,6 +154,7 @@ static void page_cache_tree_delete(struc
VM_BUG_ON_PAGE(PageTail(page), page);
VM_BUG_ON_PAGE(nr != 1 && shadow, page);
+ local_lock(shadow_nodes_lock);
for (i = 0; i < nr; i++) {
struct radix_tree_node *node;
void **slot;
@@ -163,8 +166,9 @@ static void page_cache_tree_delete(struc
radix_tree_clear_tags(&mapping->page_tree, node, slot);
__radix_tree_replace(&mapping->page_tree, node, slot, shadow,
- workingset_lookup_update(mapping));
+ __workingset_lookup_update(mapping));
}
+ local_unlock(shadow_nodes_lock);
page->mapping = NULL;
/* Leave page->index set: truncation lookup relies upon it */
@@ -358,9 +362,11 @@ page_cache_tree_delete_batch(struct addr
} else {
tail_pages--;
}
+ local_lock(shadow_nodes_lock);
radix_tree_clear_tags(&mapping->page_tree, iter.node, slot);
__radix_tree_replace(&mapping->page_tree, iter.node, slot, NULL,
- workingset_lookup_update(mapping));
+ __workingset_lookup_update(mapping));
+ local_unlock(shadow_nodes_lock);
total_pages++;
}
mapping->nrpages -= total_pages;
--- a/mm/truncate.c
+++ b/mm/truncate.c
@@ -40,8 +40,10 @@ static inline void __clear_shadow_entry(
return;
if (*slot != entry)
return;
+ local_lock(shadow_nodes_lock);
__radix_tree_replace(&mapping->page_tree, node, slot, NULL,
- workingset_update_node);
+ __workingset_update_node);
+ local_unlock(shadow_nodes_lock);
mapping->nrexceptional--;
}
--- a/mm/workingset.c
+++ b/mm/workingset.c
@@ -339,9 +339,10 @@ void workingset_activation(struct page *
* point where they would still be useful.
*/
-static struct list_lru shadow_nodes;
+static struct list_lru __shadow_nodes;
+DEFINE_LOCAL_IRQ_LOCK(shadow_nodes_lock);
-void workingset_update_node(struct radix_tree_node *node)
+void __workingset_update_node(struct radix_tree_node *node)
{
/*
* Track non-empty nodes that contain only shadow entries;
@@ -353,10 +354,10 @@ void workingset_update_node(struct radix
*/
if (node->count && node->count == node->exceptional) {
if (list_empty(&node->private_list))
- list_lru_add(&shadow_nodes, &node->private_list);
+ list_lru_add(&__shadow_nodes, &node->private_list);
} else {
if (!list_empty(&node->private_list))
- list_lru_del(&shadow_nodes, &node->private_list);
+ list_lru_del(&__shadow_nodes, &node->private_list);
}
}
@@ -368,9 +369,9 @@ static unsigned long count_shadow_nodes(
unsigned long cache;
/* list_lru lock nests inside IRQ-safe mapping->tree_lock */
- local_irq_disable();
- nodes = list_lru_shrink_count(&shadow_nodes, sc);
- local_irq_enable();
+ local_lock_irq(shadow_nodes_lock);
+ nodes = list_lru_shrink_count(&__shadow_nodes, sc);
+ local_unlock_irq(shadow_nodes_lock);
/*
* Approximate a reasonable limit for the radix tree nodes
@@ -471,15 +472,15 @@ static enum lru_status shadow_lru_isolat
inc_node_state(page_pgdat(virt_to_page(node)), WORKINGSET_NODERECLAIM);
inc_memcg_page_state(virt_to_page(node), WORKINGSET_NODERECLAIM);
__radix_tree_delete_node(&mapping->page_tree, node,
- workingset_lookup_update(mapping));
+ __workingset_lookup_update(mapping));
out_invalid:
spin_unlock(&mapping->tree_lock);
ret = LRU_REMOVED_RETRY;
out:
- local_irq_enable();
+ local_unlock_irq(shadow_nodes_lock);
cond_resched();
- local_irq_disable();
+ local_lock_irq(shadow_nodes_lock);
spin_lock(lru_lock);
return ret;
}
@@ -490,9 +491,9 @@ static unsigned long scan_shadow_nodes(s
unsigned long ret;
/* list_lru lock nests inside IRQ-safe mapping->tree_lock */
- local_irq_disable();
- ret = list_lru_shrink_walk(&shadow_nodes, sc, shadow_lru_isolate, NULL);
- local_irq_enable();
+ local_lock_irq(shadow_nodes_lock);
+ ret = list_lru_shrink_walk(&__shadow_nodes, sc, shadow_lru_isolate, NULL);
+ local_unlock_irq(shadow_nodes_lock);
return ret;
}
@@ -530,7 +531,7 @@ static int __init workingset_init(void)
pr_info("workingset: timestamp_bits=%d max_order=%d bucket_order=%u\n",
timestamp_bits, max_order, bucket_order);
- ret = __list_lru_init(&shadow_nodes, true, &shadow_nodes_key);
+ ret = __list_lru_init(&__shadow_nodes, true, &shadow_nodes_key);
if (ret)
goto err;
ret = register_shrinker(&workingset_shadow_shrinker);
@@ -538,7 +539,7 @@ static int __init workingset_init(void)
goto err_list_lru;
return 0;
err_list_lru:
- list_lru_destroy(&shadow_nodes);
+ list_lru_destroy(&__shadow_nodes);
err:
return ret;
}