Jiri Slaby ef7db2
From: Jens Axboe <axboe@kernel.dk>
Jiri Slaby ef7db2
Date: Thu, 16 Feb 2023 08:01:08 -0700
Jiri Slaby ef7db2
Subject: [PATCH] brd: check for REQ_NOWAIT and set correct page allocation
Jiri Slaby ef7db2
 mask
Jiri Slaby ef7db2
References: bsc#1012628
Jiri Slaby ef7db2
Patch-mainline: 6.2.3
Jiri Slaby ef7db2
Git-commit: 6ded703c56c21bfb259725d4f1831a5feb563e9b
Jiri Slaby ef7db2
Jiri Slaby ef7db2
commit 6ded703c56c21bfb259725d4f1831a5feb563e9b upstream.
Jiri Slaby ef7db2
Jiri Slaby ef7db2
If REQ_NOWAIT is set, then do a non-blocking allocation if the operation
Jiri Slaby ef7db2
is a write and we need to insert a new page. Currently REQ_NOWAIT cannot
Jiri Slaby ef7db2
be set as the queue isn't marked as supporting nowait, this change is in
Jiri Slaby ef7db2
preparation for allowing that.
Jiri Slaby ef7db2
Jiri Slaby ef7db2
radix_tree_preload() warns on attempting to call it with an allocation
Jiri Slaby ef7db2
mask that doesn't allow blocking. While that warning could arguably
Jiri Slaby ef7db2
be removed, we need to handle radix insertion failures anyway as they
Jiri Slaby ef7db2
are more likely if we cannot block to get memory.
Jiri Slaby ef7db2
Jiri Slaby ef7db2
Remove legacy BUG_ON()'s and turn them into proper errors instead, one
Jiri Slaby ef7db2
for the allocation failure and one for finding a page that doesn't
Jiri Slaby ef7db2
match the correct index.
Jiri Slaby ef7db2
Jiri Slaby ef7db2
Cc: stable@vger.kernel.org # 5.10+
Jiri Slaby ef7db2
Reviewed-by: Christoph Hellwig <hch@lst.de>
Jiri Slaby ef7db2
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Jiri Slaby ef7db2
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Jiri Slaby ef7db2
Signed-off-by: Jiri Slaby <jslaby@suse.cz>
Jiri Slaby ef7db2
---
Jiri Slaby ef7db2
 drivers/block/brd.c | 48 ++++++++++++++++++++++++++-------------------
Jiri Slaby ef7db2
 1 file changed, 28 insertions(+), 20 deletions(-)
Jiri Slaby ef7db2
Jiri Slaby ef7db2
diff --git a/drivers/block/brd.c b/drivers/block/brd.c
Jiri Slaby ef7db2
index 39cad8b2..740631dc 100644
Jiri Slaby ef7db2
--- a/drivers/block/brd.c
Jiri Slaby ef7db2
+++ b/drivers/block/brd.c
Jiri Slaby ef7db2
@@ -80,26 +80,21 @@ static struct page *brd_lookup_page(struct brd_device *brd, sector_t sector)
Jiri Slaby ef7db2
 /*
Jiri Slaby ef7db2
  * Insert a new page for a given sector, if one does not already exist.
Jiri Slaby ef7db2
  */
Jiri Slaby ef7db2
-static int brd_insert_page(struct brd_device *brd, sector_t sector)
Jiri Slaby ef7db2
+static int brd_insert_page(struct brd_device *brd, sector_t sector, gfp_t gfp)
Jiri Slaby ef7db2
 {
Jiri Slaby ef7db2
 	pgoff_t idx;
Jiri Slaby ef7db2
 	struct page *page;
Jiri Slaby ef7db2
-	gfp_t gfp_flags;
Jiri Slaby ef7db2
+	int ret = 0;
Jiri Slaby ef7db2
 
Jiri Slaby ef7db2
 	page = brd_lookup_page(brd, sector);
Jiri Slaby ef7db2
 	if (page)
Jiri Slaby ef7db2
 		return 0;
Jiri Slaby ef7db2
 
Jiri Slaby ef7db2
-	/*
Jiri Slaby ef7db2
-	 * Must use NOIO because we don't want to recurse back into the
Jiri Slaby ef7db2
-	 * block or filesystem layers from page reclaim.
Jiri Slaby ef7db2
-	 */
Jiri Slaby ef7db2
-	gfp_flags = GFP_NOIO | __GFP_ZERO | __GFP_HIGHMEM;
Jiri Slaby ef7db2
-	page = alloc_page(gfp_flags);
Jiri Slaby ef7db2
+	page = alloc_page(gfp | __GFP_ZERO | __GFP_HIGHMEM);
Jiri Slaby ef7db2
 	if (!page)
Jiri Slaby ef7db2
 		return -ENOMEM;
Jiri Slaby ef7db2
 
Jiri Slaby ef7db2
-	if (radix_tree_preload(GFP_NOIO)) {
Jiri Slaby ef7db2
+	if (gfpflags_allow_blocking(gfp) && radix_tree_preload(gfp)) {
Jiri Slaby ef7db2
 		__free_page(page);
Jiri Slaby ef7db2
 		return -ENOMEM;
Jiri Slaby ef7db2
 	}
Jiri Slaby ef7db2
@@ -110,15 +105,17 @@ static int brd_insert_page(struct brd_device *brd, sector_t sector)
Jiri Slaby ef7db2
 	if (radix_tree_insert(&brd->brd_pages, idx, page)) {
Jiri Slaby ef7db2
 		__free_page(page);
Jiri Slaby ef7db2
 		page = radix_tree_lookup(&brd->brd_pages, idx);
Jiri Slaby ef7db2
-		BUG_ON(!page);
Jiri Slaby ef7db2
-		BUG_ON(page->index != idx);
Jiri Slaby ef7db2
+		if (!page)
Jiri Slaby ef7db2
+			ret = -ENOMEM;
Jiri Slaby ef7db2
+		else if (page->index != idx)
Jiri Slaby ef7db2
+			ret = -EIO;
Jiri Slaby ef7db2
 	} else {
Jiri Slaby ef7db2
 		brd->brd_nr_pages++;
Jiri Slaby ef7db2
 	}
Jiri Slaby ef7db2
 	spin_unlock(&brd->brd_lock);
Jiri Slaby ef7db2
 
Jiri Slaby ef7db2
 	radix_tree_preload_end();
Jiri Slaby ef7db2
-	return 0;
Jiri Slaby ef7db2
+	return ret;
Jiri Slaby ef7db2
 }
Jiri Slaby ef7db2
 
Jiri Slaby ef7db2
 /*
Jiri Slaby ef7db2
@@ -167,19 +164,20 @@ static void brd_free_pages(struct brd_device *brd)
Jiri Slaby ef7db2
 /*
Jiri Slaby ef7db2
  * copy_to_brd_setup must be called before copy_to_brd. It may sleep.
Jiri Slaby ef7db2
  */
Jiri Slaby ef7db2
-static int copy_to_brd_setup(struct brd_device *brd, sector_t sector, size_t n)
Jiri Slaby ef7db2
+static int copy_to_brd_setup(struct brd_device *brd, sector_t sector, size_t n,
Jiri Slaby ef7db2
+			     gfp_t gfp)
Jiri Slaby ef7db2
 {
Jiri Slaby ef7db2
 	unsigned int offset = (sector & (PAGE_SECTORS-1)) << SECTOR_SHIFT;
Jiri Slaby ef7db2
 	size_t copy;
Jiri Slaby ef7db2
 	int ret;
Jiri Slaby ef7db2
 
Jiri Slaby ef7db2
 	copy = min_t(size_t, n, PAGE_SIZE - offset);
Jiri Slaby ef7db2
-	ret = brd_insert_page(brd, sector);
Jiri Slaby ef7db2
+	ret = brd_insert_page(brd, sector, gfp);
Jiri Slaby ef7db2
 	if (ret)
Jiri Slaby ef7db2
 		return ret;
Jiri Slaby ef7db2
 	if (copy < n) {
Jiri Slaby ef7db2
 		sector += copy >> SECTOR_SHIFT;
Jiri Slaby ef7db2
-		ret = brd_insert_page(brd, sector);
Jiri Slaby ef7db2
+		ret = brd_insert_page(brd, sector, gfp);
Jiri Slaby ef7db2
 	}
Jiri Slaby ef7db2
 	return ret;
Jiri Slaby ef7db2
 }
Jiri Slaby ef7db2
@@ -254,20 +252,26 @@ static void copy_from_brd(void *dst, struct brd_device *brd,
Jiri Slaby ef7db2
  * Process a single bvec of a bio.
Jiri Slaby ef7db2
  */
Jiri Slaby ef7db2
 static int brd_do_bvec(struct brd_device *brd, struct page *page,
Jiri Slaby ef7db2
-			unsigned int len, unsigned int off, enum req_op op,
Jiri Slaby ef7db2
+			unsigned int len, unsigned int off, blk_opf_t opf,
Jiri Slaby ef7db2
 			sector_t sector)
Jiri Slaby ef7db2
 {
Jiri Slaby ef7db2
 	void *mem;
Jiri Slaby ef7db2
 	int err = 0;
Jiri Slaby ef7db2
 
Jiri Slaby ef7db2
-	if (op_is_write(op)) {
Jiri Slaby ef7db2
-		err = copy_to_brd_setup(brd, sector, len);
Jiri Slaby ef7db2
+	if (op_is_write(opf)) {
Jiri Slaby ef7db2
+		/*
Jiri Slaby ef7db2
+		 * Must use NOIO because we don't want to recurse back into the
Jiri Slaby ef7db2
+		 * block or filesystem layers from page reclaim.
Jiri Slaby ef7db2
+		 */
Jiri Slaby ef7db2
+		gfp_t gfp = opf & REQ_NOWAIT ? GFP_NOWAIT : GFP_NOIO;
Jiri Slaby ef7db2
+
Jiri Slaby ef7db2
+		err = copy_to_brd_setup(brd, sector, len, gfp);
Jiri Slaby ef7db2
 		if (err)
Jiri Slaby ef7db2
 			goto out;
Jiri Slaby ef7db2
 	}
Jiri Slaby ef7db2
 
Jiri Slaby ef7db2
 	mem = kmap_atomic(page);
Jiri Slaby ef7db2
-	if (!op_is_write(op)) {
Jiri Slaby ef7db2
+	if (!op_is_write(opf)) {
Jiri Slaby ef7db2
 		copy_from_brd(mem + off, brd, sector, len);
Jiri Slaby ef7db2
 		flush_dcache_page(page);
Jiri Slaby ef7db2
 	} else {
Jiri Slaby ef7db2
@@ -296,8 +300,12 @@ static void brd_submit_bio(struct bio *bio)
Jiri Slaby ef7db2
 				(len & (SECTOR_SIZE - 1)));
Jiri Slaby ef7db2
 
Jiri Slaby ef7db2
 		err = brd_do_bvec(brd, bvec.bv_page, len, bvec.bv_offset,
Jiri Slaby ef7db2
-				  bio_op(bio), sector);
Jiri Slaby ef7db2
+				  bio->bi_opf, sector);
Jiri Slaby ef7db2
 		if (err) {
Jiri Slaby ef7db2
+			if (err == -ENOMEM && bio->bi_opf & REQ_NOWAIT) {
Jiri Slaby ef7db2
+				bio_wouldblock_error(bio);
Jiri Slaby ef7db2
+				return;
Jiri Slaby ef7db2
+			}
Jiri Slaby ef7db2
 			bio_io_error(bio);
Jiri Slaby ef7db2
 			return;
Jiri Slaby ef7db2
 		}
Jiri Slaby ef7db2
-- 
Jiri Slaby ef7db2
2.35.3
Jiri Slaby ef7db2