Blob Blame History Raw
From 55e56f06ed71d9441f3abd5b1d3c1a870812b3fe Mon Sep 17 00:00:00 2001
From: Matthew Wilcox <willy@infradead.org>
Date: Tue, 27 Nov 2018 13:16:34 -0800
Subject: [PATCH] dax: Don't access a freed inode
Git-commit: 55e56f06ed71d9441f3abd5b1d3c1a870812b3fe
Patch-mainline: v4.20-rc6
References: bsc#1120055

After we drop the i_pages lock, the inode can be freed at any time.
The get_unlocked_entry() code has no choice but to reacquire the lock,
so it can't be used here.  Create a new wait_entry_unlocked() which takes
care not to acquire the lock or dereference the address_space in any way.

Fixes: c2a7d2a11552 ("filesystem-dax: Introduce dax_lock_mapping_entry()")
Cc: <stable@vger.kernel.org>
Signed-off-by: Matthew Wilcox <willy@infradead.org>
Reviewed-by: Jan Kara <jack@suse.cz>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Acked-by: Jan Kara <jack@suse.cz>

---
 fs/dax.c |   18 ++++++++++++++----
 1 file changed, 14 insertions(+), 4 deletions(-)

--- a/fs/dax.c
+++ b/fs/dax.c
@@ -195,7 +195,8 @@ static inline void *unlock_slot(struct a
  * put_locked_mapping_entry() when he locked the entry and now wants to
  * unlock it.
  *
- * The function must be called with mapping->tree_lock held.
+ * The function must be called with mapping->tree_lock held. When -EAGAIN is
+ * reported, the tree_lock is dropped.
  */
 static void *__get_unlocked_mapping_entry(struct address_space *mapping,
 		pgoff_t index, void ***slotp, bool (*wait_fn)(void))
@@ -226,9 +227,18 @@ static void *__get_unlocked_mapping_entr
 		spin_unlock_irq(&mapping->tree_lock);
 		revalidate = wait_fn();
 		finish_wait(wq, &ewait.wait);
-		spin_lock_irq(&mapping->tree_lock);
-		if (revalidate)
+		if (revalidate) {
+			/*
+			 * Entry lock waits are exclusive. Wake up the next
+			 * waiter since we aren't sure we will acquire the
+			 * entry lock and thus wake the next waiter up on
+			 * unlock.
+			 */
+			if (waitqueue_active(wq))
+				__wake_up(wq, TASK_NORMAL, 1, &ewait.key);
 			return ERR_PTR(-EAGAIN);
+		}
+		spin_lock_irq(&mapping->tree_lock);
 	}
 }
 
@@ -405,8 +415,8 @@ bool dax_lock_mapping_entry(struct page
 			spin_unlock_irq(&mapping->tree_lock);
 			break;
 		} else if (IS_ERR(entry)) {
-			spin_unlock_irq(&mapping->tree_lock);
 			WARN_ON_ONCE(PTR_ERR(entry) != -EAGAIN);
+			/* tree_lock gets unlocked on error return */
 			continue;
 		}
 		lock_slot(mapping, slot);