Blob Blame History Raw
From 46b1ee38b2ba1a9524c8e886ad078bd3ca40de2a Mon Sep 17 00:00:00 2001
From: Ralph Campbell <rcampbell@nvidia.com>
Date: Sun, 1 Nov 2020 17:07:23 -0800
Subject: [PATCH] mm/mremap_pages: fix static key devmap_managed_key updates
Git-commit: 46b1ee38b2ba1a9524c8e886ad078bd3ca40de2a
Patch-mainline: v5.10-rc3
References: bsc#1181787

commit 6f42193fd86e ("memremap: don't use a separate devm action for
devmap_managed_enable_get") changed the static key updates such that we
now call devmap_managed_enable_put() without doing the equivalent
devmap_managed_enable_get().

devmap_managed_enable_get() is only called for MEMORY_DEVICE_PRIVATE and
MEMORY_DEVICE_FS_DAX, But memunmap_pages() get called for other pgmap
types too.  This results in the below warning when switching between
system-ram and devdax mode for devdax namespace.

   jump label: negative count!
   WARNING: CPU: 52 PID: 1335 at kernel/jump_label.c:235 static_key_slow_try_dec+0x88/0xa0
   Modules linked in:
   ....

   NIP static_key_slow_try_dec+0x88/0xa0
   LR static_key_slow_try_dec+0x84/0xa0
   Call Trace:
     static_key_slow_try_dec+0x84/0xa0
     __static_key_slow_dec_cpuslocked+0x34/0xd0
     static_key_slow_dec+0x54/0xf0
     memunmap_pages+0x36c/0x500
     devm_action_release+0x30/0x50
     release_nodes+0x2f4/0x3e0
     device_release_driver_internal+0x17c/0x280
     bus_remove_device+0x124/0x210
     device_del+0x1d4/0x530
     unregister_dev_dax+0x48/0xe0
     devm_action_release+0x30/0x50
     release_nodes+0x2f4/0x3e0
     device_release_driver_internal+0x17c/0x280
     unbind_store+0x130/0x170
     drv_attr_store+0x40/0x60
     sysfs_kf_write+0x6c/0xb0
     kernfs_fop_write+0x118/0x280
     vfs_write+0xe8/0x2a0
     ksys_write+0x84/0x140
     system_call_exception+0x120/0x270
     system_call_common+0xf0/0x27c

(Coly Li: rebased for Linux 5.3 based SUSE kernel)

Reported-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
Signed-off-by: Ralph Campbell <rcampbell@nvidia.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Tested-by: Sachin Sant <sachinp@linux.vnet.ibm.com>
Reviewed-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
Reviewed-by: Ira Weiny <ira.weiny@intel.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Cc: Dan Williams <dan.j.williams@intel.com>
Cc: Jason Gunthorpe <jgg@mellanox.com>
Link: https://lkml.kernel.org/r/20201023183222.13186-1-rcampbell@nvidia.com
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: Coly Li <colyli@suse.de>
---
 mm/memremap.c |   38 ++++++++++++++++----------------------
 1 file changed, 16 insertions(+), 22 deletions(-)

--- a/mm/memremap.c
+++ b/mm/memremap.c
@@ -41,27 +41,24 @@ EXPORT_SYMBOL_GPL(memremap_compat_align)
 DEFINE_STATIC_KEY_FALSE(devmap_managed_key);
 EXPORT_SYMBOL(devmap_managed_key);
 
-static void devmap_managed_enable_put(void)
+static void devmap_managed_enable_put(struct dev_pagemap *pgmap)
 {
-	static_branch_dec(&devmap_managed_key);
+	if (pgmap->type == MEMORY_DEVICE_PRIVATE ||
+	    pgmap->type == MEMORY_DEVICE_FS_DAX)
+		static_branch_dec(&devmap_managed_key);
 }
 
-static int devmap_managed_enable_get(struct dev_pagemap *pgmap)
+static void devmap_managed_enable_get(struct dev_pagemap *pgmap)
 {
-	if (!pgmap->ops || !pgmap->ops->page_free) {
-		WARN(1, "Missing page_free method\n");
-		return -EINVAL;
-	}
-
-	static_branch_inc(&devmap_managed_key);
-	return 0;
+	if (pgmap->type == MEMORY_DEVICE_PRIVATE ||
+	    pgmap->type == MEMORY_DEVICE_FS_DAX)
+		static_branch_inc(&devmap_managed_key);
 }
 #else
-static int devmap_managed_enable_get(struct dev_pagemap *pgmap)
+static void devmap_managed_enable_get(struct dev_pagemap *pgmap)
 {
-	return -EINVAL;
 }
-static void devmap_managed_enable_put(void)
+static void devmap_managed_enable_put(struct dev_pagemap *pgmap)
 {
 }
 #endif /* CONFIG_DEV_PAGEMAP_OPS */
@@ -166,7 +163,7 @@ void memunmap_pages(struct dev_pagemap *
 		pageunmap_range(pgmap, i);
 
 	WARN_ONCE(pgmap->altmap.alloc, "failed to free all reserved pages\n");
-	devmap_managed_enable_put();
+	devmap_managed_enable_put(pgmap);
 }
 EXPORT_SYMBOL_GPL(memunmap_pages);
 
@@ -303,7 +300,6 @@ void *memremap_pages(struct dev_pagemap
 		.pgprot = PAGE_KERNEL,
 	};
 	const int nr_range = pgmap->nr_range;
-	bool need_devmap_managed = true;
 	int error, i;
 
 	if (WARN_ONCE(!nr_range, "nr_range must be specified\n"))
@@ -319,6 +315,10 @@ void *memremap_pages(struct dev_pagemap
 			WARN(1, "Missing migrate_to_ram method\n");
 			return ERR_PTR(-EINVAL);
 		}
+		if (!pgmap->ops->page_free) {
+			WARN(1, "Missing page_free method\n");
+			return ERR_PTR(-EINVAL);
+		}
 		if (!pgmap->owner) {
 			WARN(1, "Missing owner\n");
 			return ERR_PTR(-EINVAL);
@@ -332,11 +332,9 @@ void *memremap_pages(struct dev_pagemap
 		}
 		break;
 	case MEMORY_DEVICE_GENERIC:
-		need_devmap_managed = false;
 		break;
 	case MEMORY_DEVICE_PCI_P2PDMA:
 		params.pgprot = pgprot_noncached(params.pgprot);
-		need_devmap_managed = false;
 		break;
 	default:
 		WARN(1, "Invalid pgmap type %d\n", pgmap->type);
@@ -360,11 +358,7 @@ void *memremap_pages(struct dev_pagemap
 		}
 	}
 
-	if (need_devmap_managed) {
-		error = devmap_managed_enable_get(pgmap);
-		if (error)
-			return ERR_PTR(error);
-	}
+	devmap_managed_enable_get(pgmap);
 
 	/*
 	 * Clear the pgmap nr_range as it will be incremented for each