From 7d121d701d58a92f26decb10da1d04a88b74519d Mon Sep 17 00:00:00 2001 From: Breno Leitao Date: Tue, 6 Jan 2026 06:26:57 -0800 Subject: [PATCH] blk-rq-qos: Remove unlikely() hints from QoS checks The unlikely() annotations on QUEUE_FLAG_QOS_ENABLED checks are counterproductive. Writeback throttling (WBT) might be enabled by default, mainly because CONFIG_BLK_WBT_MQ defaults to 'y'. Branch profiling on Meta servers, which have WBT enabled, confirms 100% misprediction rates on these checks. Remove the unlikely() annotations to let the CPU's branch predictor learn the actual behavior, potentially improving I/O path performance. Signed-off-by: Breno Leitao Signed-off-by: Jens Axboe --- block/blk-rq-qos.h | 25 +++++++++---------------- 1 file changed, 9 insertions(+), 16 deletions(-) diff --git a/block/blk-rq-qos.h b/block/blk-rq-qos.h index b538f2c0febc..a747a504fe42 100644 --- a/block/blk-rq-qos.h +++ b/block/blk-rq-qos.h @@ -112,29 +112,26 @@ void __rq_qos_queue_depth_changed(struct rq_qos *rqos); static inline void rq_qos_cleanup(struct request_queue *q, struct bio *bio) { - if (unlikely(test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags)) && - q->rq_qos) + if (test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags) && q->rq_qos) __rq_qos_cleanup(q->rq_qos, bio); } static inline void rq_qos_done(struct request_queue *q, struct request *rq) { - if (unlikely(test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags)) && - q->rq_qos && !blk_rq_is_passthrough(rq)) + if (test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags) && + q->rq_qos && !blk_rq_is_passthrough(rq)) __rq_qos_done(q->rq_qos, rq); } static inline void rq_qos_issue(struct request_queue *q, struct request *rq) { - if (unlikely(test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags)) && - q->rq_qos) + if (test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags) && q->rq_qos) __rq_qos_issue(q->rq_qos, rq); } static inline void rq_qos_requeue(struct request_queue *q, struct request *rq) { - if (unlikely(test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags)) && - q->rq_qos) + if (test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags) && q->rq_qos) __rq_qos_requeue(q->rq_qos, rq); } @@ -162,8 +159,7 @@ static inline void rq_qos_done_bio(struct bio *bio) static inline void rq_qos_throttle(struct request_queue *q, struct bio *bio) { - if (unlikely(test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags)) && - q->rq_qos) { + if (test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags) && q->rq_qos) { bio_set_flag(bio, BIO_QOS_THROTTLED); __rq_qos_throttle(q->rq_qos, bio); } @@ -172,16 +168,14 @@ static inline void rq_qos_throttle(struct request_queue *q, struct bio *bio) static inline void rq_qos_track(struct request_queue *q, struct request *rq, struct bio *bio) { - if (unlikely(test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags)) && - q->rq_qos) + if (test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags) && q->rq_qos) __rq_qos_track(q->rq_qos, rq, bio); } static inline void rq_qos_merge(struct request_queue *q, struct request *rq, struct bio *bio) { - if (unlikely(test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags)) && - q->rq_qos) { + if (test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags) && q->rq_qos) { bio_set_flag(bio, BIO_QOS_MERGED); __rq_qos_merge(q->rq_qos, rq, bio); } @@ -189,8 +183,7 @@ static inline void rq_qos_merge(struct request_queue *q, struct request *rq, static inline void rq_qos_queue_depth_changed(struct request_queue *q) { - if (unlikely(test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags)) && - q->rq_qos) + if (test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags) && q->rq_qos) __rq_qos_queue_depth_changed(q->rq_qos); }