Blob Blame History Raw
Subject: be more aggressive about de-activating mm-s under destruction
From: jbeulich@novell.com
Patch-mainline: obsolete

... by not only handling the current task on the CPU arch_exit_mmap()
gets executed on, but also forcing remote CPUs to do so.

--- head-2010-04-15.orig/arch/x86/mm/pgtable-xen.c	2010-04-15 11:47:53.000000000 +0200
+++ head-2010-04-15/arch/x86/mm/pgtable-xen.c	2010-04-15 11:48:29.000000000 +0200
@@ -1,6 +1,7 @@
 #include <linux/mm.h>
 #include <linux/gfp.h>
 #include <linux/module.h>
+#include <linux/smp.h>
 #include <xen/features.h>
 #include <asm/pgalloc.h>
 #include <asm/pgtable.h>
@@ -438,27 +439,44 @@ void arch_dup_mmap(struct mm_struct *old
 		mm_pin(mm);
 }
 
-void arch_exit_mmap(struct mm_struct *mm)
+/*
+ * We aggressively remove defunct pgd from cr3. We execute unmap_vmas() *much*
+ * faster this way, as no hypercalls are needed for the page table updates.
+ */
+static void leave_active_mm(struct task_struct *tsk, struct mm_struct *mm)
+	__releases(tsk->alloc_lock)
 {
-	struct task_struct *tsk = current;
-
-	task_lock(tsk);
-
-	/*
-	 * We aggressively remove defunct pgd from cr3. We execute unmap_vmas()
-	 * *much* faster this way, as no tlb flushes means bigger wrpt batches.
-	 */
 	if (tsk->active_mm == mm) {
 		tsk->active_mm = &init_mm;
 		atomic_inc(&init_mm.mm_count);
 
 		switch_mm(mm, &init_mm, tsk);
 
-		atomic_dec(&mm->mm_count);
-		BUG_ON(atomic_read(&mm->mm_count) == 0);
+		if (atomic_dec_and_test(&mm->mm_count))
+			BUG();
 	}
 
 	task_unlock(tsk);
+}
+
+static void _leave_active_mm(void *mm)
+{
+	struct task_struct *tsk = current;
+
+	if (spin_trylock(&tsk->alloc_lock))
+		leave_active_mm(tsk, mm);
+}
+
+void arch_exit_mmap(struct mm_struct *mm)
+{
+	struct task_struct *tsk = current;
+
+	task_lock(tsk);
+	leave_active_mm(tsk, mm);
+
+	preempt_disable();
+	smp_call_function_many(mm_cpumask(mm), _leave_active_mm, mm, 1);
+	preempt_enable();
 
 	if (PagePinned(virt_to_page(mm->pgd))
 	    && atomic_read(&mm->mm_count) == 1