Blob Blame History Raw
From: Dmitry Monakhov <dmonakhov@gmail.com>
Date: Fri, 1 Nov 2019 13:11:10 +0000
Subject: [PATCH] block,bfq: Skip tracing hooks if possible
References: bsc#1175995,jsc#SLE-15608
Git-commit: 40d47c155e8ae9bcb3f2d0d01cf14d903c664726
Patch-mainline: v5.5-rc1

In most cases blk_tracing is not active, but  bfq_log_bfqq macro
generate pid_str unconditionally, which result in significant overhead.

## Test
modprobe null_blk
echo bfq > /sys/block/nullb0/queue/scheduler
fio --name=t --ioengine=libaio --direct=1 --filename=/dev/nullb0 \
   --runtime=30 --time_based=1 --rw=write --iodepth=128 --bs=4k

# Results
|        | baseline | w/ patch | gain |
| iops   | 113.19K  | 126.42K  | +11% |

Acked-by: Paolo Valente <paolo.valente@linaro.org>
Signed-off-by: Dmitry Monakhov <dmonakhov@gmail.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Signed-off-by: Hannes Reinecke <hare@suse.de>
---
 block/bfq-iosched.h | 4 ++++
 1 file changed, 4 insertions(+)

diff --git a/block/bfq-iosched.h b/block/bfq-iosched.h
index 63231d17c6cc..bbf5e89a8c27 100644
--- a/block/bfq-iosched.h
+++ b/block/bfq-iosched.h
@@ -1070,6 +1070,8 @@ struct bfq_group *bfqq_group(struct bfq_queue *bfqq);
 
 #define bfq_log_bfqq(bfqd, bfqq, fmt, args...)	do {			\
 	char pid_str[MAX_PID_STR_LENGTH];	\
+	if (likely(!blk_trace_note_message_enabled((bfqd)->queue)))	\
+		break;							\
 	bfq_pid_to_str((bfqq)->pid, pid_str, MAX_PID_STR_LENGTH);	\
 	blk_add_cgroup_trace_msg((bfqd)->queue,				\
 			bfqg_to_blkg(bfqq_group(bfqq))->blkcg,		\
@@ -1086,6 +1088,8 @@ struct bfq_group *bfqq_group(struct bfq_queue *bfqq);
 
 #define bfq_log_bfqq(bfqd, bfqq, fmt, args...) do {	\
 	char pid_str[MAX_PID_STR_LENGTH];	\
+	if (likely(!blk_trace_note_message_enabled((bfqd)->queue)))	\
+		break;							\
 	bfq_pid_to_str((bfqq)->pid, pid_str, MAX_PID_STR_LENGTH);	\
 	blk_add_trace_msg((bfqd)->queue, "bfq%s%c " fmt, pid_str,	\
 			bfq_bfqq_sync((bfqq)) ? 'S' : 'A',		\
-- 
2.16.4