Blob Blame History Raw
From ffa358dcaae1f2f00926484e712e06daa8953cb4 Mon Sep 17 00:00:00 2001
From: Jens Axboe <axboe@kernel.dk>
Date: Mon, 20 Aug 2018 13:24:25 -0600
Subject: [PATCH] blk-wbt: move disable check into get_limit()
Git-commit: ffa358dcaae1f2f00926484e712e06daa8953cb4
Patch-mainline: v4.19-rc1
References: bsc#1135873

Check it in one place, instead of in multiple places.

Tested-by: Anchal Agarwal <anchalag@amazon.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Acked-by: Jan Kara <jack@suse.cz>

---
 block/blk-wbt.c |   23 +++++++----------------
 1 file changed, 7 insertions(+), 16 deletions(-)

--- a/block/blk-wbt.c
+++ b/block/blk-wbt.c
@@ -481,6 +481,13 @@ static inline unsigned int get_limit(str
 	unsigned int limit;
 
 	/*
+	 * If we got disabled, just return UINT_MAX. This ensures that
+	 * we'll properly inc a new IO, and dec+wakeup at the end.
+	 */
+	if (!rwb_enabled(rwb))
+		return UINT_MAX;
+
+	/*
 	 * At this point we know it's a buffered write. If this is
 	 * kswapd trying to free memory, or REQ_SYNC is set, set, then
 	 * it's WB_SYNC_ALL writeback, and we'll use the max limit for
@@ -513,16 +520,6 @@ static void __wbt_wait(struct rq_wb *rwb
 	struct rq_wait *rqw = get_rq_wait(rwb, current_is_kswapd());
 	DECLARE_WAITQUEUE(wait, current);
 
-	/*
-	* inc it here even if disabled, since we'll dec it at completion.
-	* this only happens if the task was sleeping in __wbt_wait(),
-	* and someone turned it off at the same time.
-	*/
-	if (!rwb_enabled(rwb)) {
-	       atomic_inc(&rqw->inflight);
-	       return;
-	}
-
 	if (!waitqueue_active(&rqw->wait)
 		&& atomic_inc_below(&rqw->inflight, get_limit(rwb, rw)))
 		return;
@@ -531,12 +528,6 @@ static void __wbt_wait(struct rq_wb *rwb
 	do {
 		set_current_state(TASK_UNINTERRUPTIBLE);
 
-		if (!rwb_enabled(rwb)) {
-		       atomic_inc(&rqw->inflight);
-		       break;
-		}
-
-
 		if (atomic_inc_below(&rqw->inflight, get_limit(rwb, rw)))
 			break;