Blob Blame History Raw
From: Jens Axboe <axboe@kernel.dk>
Date: Fri, 18 Jan 2019 10:34:16 -0700
Subject: bfq: update internal depth state when queue depth changes
Git-commit: 77f1e0a52d26242b6c2dba019f6ebebfb9ff701e
Patch-mainline: v5.1-rc6
References: bsc#1172455

A previous commit moved the shallow depth and BFQ depth map calculations
to be done at init time, moving it outside of the hotter IO path. This
potentially causes hangs if the users changes the depth of the scheduler
map, by writing to the 'nr_requests' sysfs file for that device.

Add a blk-mq-sched hook that allows blk-mq to inform the scheduler if
the depth changes, so that the scheduler can update its internal state.

[lduncan: modified to apply correctly, since our "ops" structure
 is still a union, which is not true upstream.]

Tested-by: Kai Krakow <kai@kaishome.de>
Reported-by: Paolo Valente <paolo.valente@linaro.org>
Fixes: f0635b8a416e ("bfq: calculate shallow depths at init time")
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Acked-by: Lee Duncan <lduncan@suse.com>
---
 block/bfq-iosched.c      |    8 +++++++-
 block/blk-mq.c           |    2 ++
 include/linux/elevator.h |    1 +
 3 files changed, 10 insertions(+), 1 deletion(-)

--- a/block/bfq-iosched.c
+++ b/block/bfq-iosched.c
@@ -5342,7 +5342,7 @@ static unsigned int bfq_update_depths(st
 	return min_shallow;
 }
 
-static int bfq_init_hctx(struct blk_mq_hw_ctx *hctx, unsigned int index)
+static void bfq_depth_updated(struct blk_mq_hw_ctx *hctx)
 {
 	struct bfq_data *bfqd = hctx->queue->elevator->elevator_data;
 	struct blk_mq_tags *tags = hctx->sched_tags;
@@ -5350,6 +5350,11 @@ static int bfq_init_hctx(struct blk_mq_h
 
 	min_shallow = bfq_update_depths(bfqd, &tags->bitmap_tags);
 	sbitmap_queue_min_shallow_depth(&tags->bitmap_tags, min_shallow);
+}
+
+static int bfq_init_hctx(struct blk_mq_hw_ctx *hctx, unsigned int index)
+{
+	bfq_depth_updated(hctx);
 	return 0;
 }
 
@@ -5772,6 +5777,7 @@ static struct elevator_type iosched_bfq_
 		.requests_merged	= bfq_requests_merged,
 		.request_merged		= bfq_request_merged,
 		.has_work		= bfq_has_work,
+		.depth_updated		= bfq_depth_updated,
 		.init_hctx		= bfq_init_hctx,
 		.init_sched		= bfq_init_queue,
 		.exit_sched		= bfq_exit_queue,
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -2929,6 +2929,8 @@ int blk_mq_update_nr_requests(struct req
 		}
 		if (ret)
 			break;
+		if (q->elevator && q->elevator->type->ops.mq.depth_updated)
+			q->elevator->type->ops.mq.depth_updated(hctx);
 	}
 
 	if (!ret)
--- a/include/linux/elevator.h
+++ b/include/linux/elevator.h
@@ -98,6 +98,7 @@ struct elevator_mq_ops {
 	void (*exit_sched)(struct elevator_queue *);
 	int (*init_hctx)(struct blk_mq_hw_ctx *, unsigned int);
 	void (*exit_hctx)(struct blk_mq_hw_ctx *, unsigned int);
+	void (*depth_updated)(struct blk_mq_hw_ctx *);
 
 	bool (*allow_merge)(struct request_queue *, struct request *, struct bio *);
 	bool (*bio_merge)(struct blk_mq_hw_ctx *, struct bio *);