Blob Blame History Raw
From: Ming Lei <ming.lei@redhat.com>
Date: Wed, 10 Jan 2018 10:51:29 +0800
Subject: [PATCH] Revert "block: blk-merge: try to make front segments in full
 size"
Git-commit: b4b6cb613519b7449da510bccf08986371b328cb
Patch-mainline: v4.16-rc1
References: bsc#1104967,FATE#325924

This reverts commit a2d37968d784363842f87820a21e106741d28004.

If max segment size isn't 512-aligned, this patch won't work well.

Also once multipage bvec is enabled, adjacent bvecs won't be physically
contiguous if page is added via bio_add_page(), so we don't need this
kind of complicated logic.

Reported-by: Dmitry Osipenko <digetx@gmail.com>
Signed-off-by: Ming Lei <ming.lei@redhat.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Acked-by: Hannes Reinecke <hare@suse.com>
---
 block/blk-merge.c | 54 +++++-------------------------------------------------
 1 file changed, 5 insertions(+), 49 deletions(-)

diff --git a/block/blk-merge.c b/block/blk-merge.c
index 446f63e076aa..8452fc7164cc 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -109,7 +109,6 @@ static struct bio *blk_bio_segment_split(struct request_queue *q,
 	bool do_split = true;
 	struct bio *new = NULL;
 	const unsigned max_sectors = get_max_io_size(q, bio);
-	unsigned advance = 0;
 
 	bio_for_each_segment(bv, bio, iter) {
 		/*
@@ -133,32 +132,12 @@ static struct bio *blk_bio_segment_split(struct request_queue *q,
 		}
 
 		if (bvprvp && blk_queue_cluster(q)) {
+			if (seg_size + bv.bv_len > queue_max_segment_size(q))
+				goto new_segment;
 			if (!BIOVEC_PHYS_MERGEABLE(bvprvp, &bv))
 				goto new_segment;
 			if (!BIOVEC_SEG_BOUNDARY(q, bvprvp, &bv))
 				goto new_segment;
-			if (seg_size + bv.bv_len > queue_max_segment_size(q)) {
-				/*
-				 * One assumption is that initial value of
-				 * @seg_size(equals to bv.bv_len) won't be
-				 * bigger than max segment size, but this
-				 * becomes false after multipage bvecs.
-				 */
-				advance = queue_max_segment_size(q) - seg_size;
-
-				if (advance > 0) {
-					seg_size += advance;
-					sectors += advance >> 9;
-					bv.bv_len -= advance;
-					bv.bv_offset += advance;
-				}
-
-				/*
-				 * Still need to put remainder of current
-				 * bvec into a new segment.
-				 */
-				goto new_segment;
-			}
 
 			seg_size += bv.bv_len;
 			bvprv = bv;
@@ -180,12 +159,6 @@ static struct bio *blk_bio_segment_split(struct request_queue *q,
 		seg_size = bv.bv_len;
 		sectors += bv.bv_len >> 9;
 
-		/* restore the bvec for iterator */
-		if (advance) {
-			bv.bv_len += advance;
-			bv.bv_offset -= advance;
-			advance = 0;
-		}
 	}
 
 	do_split = false;
@@ -386,29 +359,16 @@ __blk_segment_map_sg(struct request_queue *q, struct bio_vec *bvec,
 {
 
 	int nbytes = bvec->bv_len;
-	unsigned advance = 0;
 
 	if (*sg && *cluster) {
+		if ((*sg)->length + nbytes > queue_max_segment_size(q))
+			goto new_segment;
+
 		if (!BIOVEC_PHYS_MERGEABLE(bvprv, bvec))
 			goto new_segment;
 		if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bvec))
 			goto new_segment;
 
-		/*
-		 * try best to merge part of the bvec into previous
-		 * segment and follow same policy with
-		 * blk_bio_segment_split()
-		 */
-		if ((*sg)->length + nbytes > queue_max_segment_size(q)) {
-			advance = queue_max_segment_size(q) - (*sg)->length;
-			if (advance) {
-				(*sg)->length += advance;
-				bvec->bv_offset += advance;
-				bvec->bv_len -= advance;
-			}
-			goto new_segment;
-		}
-
 		(*sg)->length += nbytes;
 	} else {
 new_segment:
@@ -431,10 +391,6 @@ __blk_segment_map_sg(struct request_queue *q, struct bio_vec *bvec,
 
 		sg_set_page(*sg, bvec->bv_page, nbytes, bvec->bv_offset);
 		(*nsegs)++;
-
-		/* for making iterator happy */
-		bvec->bv_offset -= advance;
-		bvec->bv_len += advance;
 	}
 	*bvprv = *bvec;
 }
-- 
2.16.4