From 40d47c155e8ae9bcb3f2d0d01cf14d903c664726 Mon Sep 17 00:00:00 2001 From: Dmitry Monakhov Date: Fri, 1 Nov 2019 13:11:10 +0000 Subject: block,bfq: Skip tracing hooks if possible In most cases blk_tracing is not active, but bfq_log_bfqq macro generate pid_str unconditionally, which result in significant overhead. ## Test modprobe null_blk echo bfq > /sys/block/nullb0/queue/scheduler fio --name=t --ioengine=libaio --direct=1 --filename=/dev/nullb0 \ --runtime=30 --time_based=1 --rw=write --iodepth=128 --bs=4k # Results | | baseline | w/ patch | gain | | iops | 113.19K | 126.42K | +11% | Acked-by: Paolo Valente Signed-off-by: Dmitry Monakhov Signed-off-by: Jens Axboe --- block/bfq-iosched.h | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'block/bfq-iosched.h') diff --git a/block/bfq-iosched.h b/block/bfq-iosched.h index 9c82c1f35716..8526f20c53bc 100644 --- a/block/bfq-iosched.h +++ b/block/bfq-iosched.h @@ -1068,6 +1068,8 @@ struct bfq_group *bfqq_group(struct bfq_queue *bfqq); #define bfq_log_bfqq(bfqd, bfqq, fmt, args...) do { \ char pid_str[MAX_PID_STR_LENGTH]; \ + if (likely(!blk_trace_note_message_enabled((bfqd)->queue))) \ + break; \ bfq_pid_to_str((bfqq)->pid, pid_str, MAX_PID_STR_LENGTH); \ blk_add_cgroup_trace_msg((bfqd)->queue, \ bfqg_to_blkg(bfqq_group(bfqq))->blkcg, \ @@ -1084,6 +1086,8 @@ struct bfq_group *bfqq_group(struct bfq_queue *bfqq); #define bfq_log_bfqq(bfqd, bfqq, fmt, args...) do { \ char pid_str[MAX_PID_STR_LENGTH]; \ + if (likely(!blk_trace_note_message_enabled((bfqd)->queue))) \ + break; \ bfq_pid_to_str((bfqq)->pid, pid_str, MAX_PID_STR_LENGTH); \ blk_add_trace_msg((bfqd)->queue, "bfq%s%c " fmt, pid_str, \ bfq_bfqq_sync((bfqq)) ? 'S' : 'A', \ -- cgit v1.2.3