Blob Blame History Raw
From: Christoph Hellwig <hch@lst.de>
Date: Tue, 12 Oct 2021 12:40:44 +0200
Subject: [PATCH] blk-mq: cleanup and rename __blk_mq_alloc_request
Git-commit: b90cfaed3789ecdc5580027fc91e3056bc6b3216
Patch-mainline: v5.16-rc1
References: jsc#PED-1183

The newly added loop for the cached requests in __blk_mq_alloc_request
is a little too convoluted for my taste, so unwind it a bit.  Also
rename the function to __blk_mq_alloc_requests now that it can allocate
more than a single request.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Link: https://lore.kernel.org/r/20211012104045.658051-2-hch@lst.de
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Acked-by: Hannes Reinecke <hare@suse.com>
---
 block/blk-mq.c | 56 +++++++++++++++++++++++++-------------------------
 1 file changed, 28 insertions(+), 28 deletions(-)

diff --git a/block/blk-mq.c b/block/blk-mq.c
index d9f14d3c2b8c..98a5d0850b95 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -354,7 +354,7 @@ static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data,
 	return rq;
 }
 
-static struct request *__blk_mq_alloc_request(struct blk_mq_alloc_data *data)
+static struct request *__blk_mq_alloc_requests(struct blk_mq_alloc_data *data)
 {
 	struct request_queue *q = data->q;
 	struct elevator_queue *e = q->elevator;
@@ -395,36 +395,36 @@ static struct request *__blk_mq_alloc_request(struct blk_mq_alloc_data *data)
 	 */
 	do {
 		tag = blk_mq_get_tag(data);
-		if (tag != BLK_MQ_NO_TAG) {
-			rq = blk_mq_rq_ctx_init(data, tag, alloc_time_ns);
-			if (!--data->nr_tags)
-				return rq;
-			if (e || data->hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED)
-				return rq;
-			rq->rq_next = *data->cached_rq;
-			*data->cached_rq = rq;
-			data->flags |= BLK_MQ_REQ_NOWAIT;
-			continue;
+		if (tag == BLK_MQ_NO_TAG) {
+			if (data->flags & BLK_MQ_REQ_NOWAIT)
+				break;
+			/*
+			 * Give up the CPU and sleep for a random short time to
+			 * ensure that thread using a realtime scheduling class
+			 * are migrated off the CPU, and thus off the hctx that
+			 * is going away.
+			 */
+			msleep(3);
+			goto retry;
 		}
-		if (data->flags & BLK_MQ_REQ_NOWAIT)
-			break;
 
-		/*
-		 * Give up the CPU and sleep for a random short time to ensure
-		 * that thread using a realtime scheduling class are migrated
-		 * off the CPU, and thus off the hctx that is going away.
-		 */
-		msleep(3);
-		goto retry;
+		rq = blk_mq_rq_ctx_init(data, tag, alloc_time_ns);
+		if (!--data->nr_tags || e ||
+		    (data->hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED))
+			return rq;
+
+		/* link into the cached list */
+		rq->rq_next = *data->cached_rq;
+		*data->cached_rq = rq;
+		data->flags |= BLK_MQ_REQ_NOWAIT;
 	} while (1);
 
-	if (data->cached_rq) {
-		rq = *data->cached_rq;
-		*data->cached_rq = rq->rq_next;
-		return rq;
-	}
+	if (!data->cached_rq)
+		return NULL;
 
-	return NULL;
+	rq = *data->cached_rq;
+	*data->cached_rq = rq->rq_next;
+	return rq;
 }
 
 struct request *blk_mq_alloc_request(struct request_queue *q, unsigned int op,
@@ -443,7 +443,7 @@ struct request *blk_mq_alloc_request(struct request_queue *q, unsigned int op,
 	if (ret)
 		return ERR_PTR(ret);
 
-	rq = __blk_mq_alloc_request(&data);
+	rq = __blk_mq_alloc_requests(&data);
 	if (!rq)
 		goto out_queue_exit;
 	rq->__data_len = 0;
@@ -2258,7 +2258,7 @@ blk_qc_t blk_mq_submit_bio(struct bio *bio)
 			plug->nr_ios = 1;
 			data.cached_rq = &plug->cached_rq;
 		}
-		rq = __blk_mq_alloc_request(&data);
+		rq = __blk_mq_alloc_requests(&data);
 		if (unlikely(!rq)) {
 			rq_qos_cleanup(q, bio);
 			if (bio->bi_opf & REQ_NOWAIT)
-- 
2.35.3