Blob Blame History Raw
From 5165ed40a1f0a3bf03526aad96df736556fbe64f Mon Sep 17 00:00:00 2001
From: Logan Gunthorpe <logang@deltatee.com>
Date: Wed, 27 Jul 2022 15:05:56 -0600
Subject: [PATCH] md/raid5: Refactor raid5_get_active_stripe()
Git-commit: 5165ed40a1f0a3bf03526aad96df736556fbe64f
Patch-mainline: v6.0-rc1
References: jsc#PED-2766

Refactor the raid5_get_active_stripe() to read more linearly in
the order it's typically executed.

The init_stripe() call is called if a free stripe is found and the
function is exited early which removes a lot of if (sh) checks and
unindents the following code.

Remove the while loop in favour of the 'goto retry' pattern, which
reduces indentation further. And use a 'goto wait_for_stripe' instead
of an additional indent seeing it is the unusual path and this makes
the code easier to read.

No functional changes intended. Will make subsequent changes
in patches easier to understand.

Signed-off-by: Logan Gunthorpe <logang@deltatee.com>
Signed-off-by: Song Liu <song@kernel.org>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Signed-off-by: Coly Li <colyli@suse.de>
---
 drivers/md/raid5.c | 67 +++++++++++++++++++++++++---------------------
 1 file changed, 36 insertions(+), 31 deletions(-)

diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 9270a714cceb..97e8d4baf3fc 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -766,41 +766,46 @@ raid5_get_active_stripe(struct r5conf *conf, sector_t sector,
 
 	spin_lock_irq(conf->hash_locks + hash);
 
-	do {
-		wait_event_lock_irq(conf->wait_for_quiescent,
-				    conf->quiesce == 0 || noquiesce,
-				    *(conf->hash_locks + hash));
-		sh = find_get_stripe(conf, sector, conf->generation - previous,
-				     hash);
-		if (sh)
-			break;
+retry:
+	wait_event_lock_irq(conf->wait_for_quiescent,
+			    conf->quiesce == 0 || noquiesce,
+			    *(conf->hash_locks + hash));
+	sh = find_get_stripe(conf, sector, conf->generation - previous, hash);
+	if (sh)
+		goto out;
 
-		if (!test_bit(R5_INACTIVE_BLOCKED, &conf->cache_state)) {
-			sh = get_free_stripe(conf, hash);
-			if (!sh && !test_bit(R5_DID_ALLOC, &conf->cache_state))
-				set_bit(R5_ALLOC_MORE, &conf->cache_state);
-		}
-		if (noblock && !sh)
-			break;
+	if (test_bit(R5_INACTIVE_BLOCKED, &conf->cache_state))
+		goto wait_for_stripe;
 
+	sh = get_free_stripe(conf, hash);
+	if (sh) {
 		r5c_check_stripe_cache_usage(conf);
-		if (!sh) {
-			set_bit(R5_INACTIVE_BLOCKED, &conf->cache_state);
-			r5l_wake_reclaim(conf->log, 0);
-			wait_event_lock_irq(conf->wait_for_stripe,
-					!list_empty(conf->inactive_list + hash) &&
-					(atomic_read(&conf->active_stripes)
-					 < (conf->max_nr_stripes * 3 / 4)
-					 || !test_bit(R5_INACTIVE_BLOCKED,
-						      &conf->cache_state)),
-					*(conf->hash_locks + hash));
-			clear_bit(R5_INACTIVE_BLOCKED, &conf->cache_state);
-		} else {
-			init_stripe(sh, sector, previous);
-			atomic_inc(&sh->count);
-		}
-	} while (sh == NULL);
+		init_stripe(sh, sector, previous);
+		atomic_inc(&sh->count);
+		goto out;
+	}
 
+	if (!test_bit(R5_DID_ALLOC, &conf->cache_state))
+		set_bit(R5_ALLOC_MORE, &conf->cache_state);
+
+wait_for_stripe:
+	if (noblock)
+		goto out;
+
+	r5c_check_stripe_cache_usage(conf);
+	set_bit(R5_INACTIVE_BLOCKED, &conf->cache_state);
+	r5l_wake_reclaim(conf->log, 0);
+	wait_event_lock_irq(conf->wait_for_stripe,
+			    !list_empty(conf->inactive_list + hash) &&
+			    (atomic_read(&conf->active_stripes)
+				  < (conf->max_nr_stripes * 3 / 4)
+				 || !test_bit(R5_INACTIVE_BLOCKED,
+					      &conf->cache_state)),
+			    *(conf->hash_locks + hash));
+	clear_bit(R5_INACTIVE_BLOCKED, &conf->cache_state);
+	goto retry;
+
+out:
 	spin_unlock_irq(conf->hash_locks + hash);
 	return sh;
 }
-- 
2.35.3