From 34dc45be4563f344d59ba0428416d0d265aa4f4d Mon Sep 17 00:00:00 2001
From: Dan Williams <dan.j.williams@intel.com>
Date: Thu, 25 Feb 2021 17:17:08 -0800
Subject: [PATCH] mm: fix memory_failure() handling of dax-namespace metadata
Git-commit: 34dc45be4563f344d59ba0428416d0d265aa4f4d
Patch-mainline: v5.12-rc1
References: bsc#1185335
Given 'struct dev_pagemap' spans both data pages and metadata pages be
careful to consult the altmap if present to delineate metadata. In fact
the pfn_first() helper already identifies the first valid data pfn, so
export that helper for other code paths via pgmap_pfn_valid().
Other usage of get_dev_pagemap() are not a concern because those are
operating on known data pfns having been looked up by get_user_pages().
I.e. metadata pfns are never user mapped.
Link: https://lkml.kernel.org/r/161058501758.1840162.4239831989762604527.stgit@dwillia2-desk3.amr.corp.intel.com
Fixes: 6100e34b2526 ("mm, memory_failure: Teach memory_failure() about dev_pagemap pages")
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Reported-by: David Hildenbrand <david@redhat.com>
Reviewed-by: David Hildenbrand <david@redhat.com>
Reviewed-by: Naoya Horiguchi <naoya.horiguchi@nec.com>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Oscar Salvador <osalvador@suse.de>
Cc: Qian Cai <cai@lca.pw>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Acked-by: Jan Kara <jack@suse.cz>
---
include/linux/memremap.h | 6 ++++++
kernel/memremap.c | 9 +++++++++
mm/memory-failure.c | 6 ++++++
3 files changed, 21 insertions(+)
--- a/include/linux/memremap.h
+++ b/include/linux/memremap.h
@@ -135,6 +135,7 @@ void *devm_memremap_pages(struct device
void devm_memunmap_pages(struct device *dev, struct dev_pagemap *pgmap);
struct dev_pagemap *get_dev_pagemap(unsigned long pfn,
struct dev_pagemap *pgmap);
+bool pgmap_pfn_valid(struct dev_pagemap *pgmap, unsigned long pfn);
unsigned long vmem_altmap_offset(struct vmem_altmap *altmap);
void vmem_altmap_free(struct vmem_altmap *altmap, unsigned long nr_pfns);
@@ -162,6 +163,11 @@ static inline struct dev_pagemap *get_de
return NULL;
}
+static inline bool pgmap_pfn_valid(struct dev_pagemap *pgmap, unsigned long pfn)
+{
+ return false;
+}
+
static inline unsigned long vmem_altmap_offset(struct vmem_altmap *altmap)
{
return 0;
--- a/kernel/memremap.c
+++ b/kernel/memremap.c
@@ -269,6 +269,15 @@ static unsigned long pfn_first(struct de
return pfn;
}
+bool pgmap_pfn_valid(struct dev_pagemap *pgmap, unsigned long pfn)
+{
+ const struct resource *res = &pgmap->res;
+
+ if (pfn >= PHYS_PFN(res->start) && pfn <= PHYS_PFN(res->end))
+ return pfn >= pfn_first(pgmap);
+ return false;
+}
+
static unsigned long pfn_end(struct dev_pagemap *pgmap)
{
const struct resource *res = &pgmap->res;
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -1177,6 +1177,12 @@ static int memory_failure_dev_pagemap(un
*/
put_page(page);
+ /* device metadata space is not recoverable */
+ if (!pgmap_pfn_valid(pgmap, pfn)) {
+ rc = -ENXIO;
+ goto out;
+ }
+
/*
* Prevent the inode from being freed while we are interrogating
* the address_space, typically this would be handled by