1
0
mirror of https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git synced 2026-01-11 00:50:21 +00:00

block-6.19-20260109

-----BEGIN PGP SIGNATURE-----
 
 iQJEBAABCAAuFiEEwPw5LcreJtl1+l5K99NY+ylx4KYFAmlhS3UQHGF4Ym9lQGtl
 cm5lbC5kawAKCRD301j7KXHgpqN1EACZI7gCMHL+CI5utvaQVPoZbyDf3jED73KO
 NwDyLKl/frGW2njbM/hcSSH0SITGYnrN0+KGr9JFIIu/AMnl+0prl74DrPjUsQ3x
 b9FwHYcjgQxPEIR39KxqSGAJTrxNxGFyS0OaTg91OMKg8Ze57WlkDRtRIJBpsTB4
 I2OUrMC34fVvjSTzefErB/eNsY3xAO8aFpWbBGD2h/GpH0f3SgGTAu7JH6Hj1Zfw
 kFWyMMSc/JkGB7wSOLxDB2IepS7PkLwlRaU6rHV3xzI1DXs24oUT8E20VU8JMedf
 WLQpzNSfqKws6KQa9LIywMo/bwA4dh3FogUJ6MflJKZoGCiMQnps4f18L6EI+w9L
 NpDCWkNgNwd6siDbTBZebd8YlqkWJYJ7NPwTl9dBdczX4DWsfej0exC2UPgN3B7R
 MQNKuP/+oC7y92igMAXIgFRQIwriVNFCsW/Q3oZSDTJSmaDc7CvONNaLnRom0sen
 1uPt/8w7bz8PkUlVUt6SFl0+KaCXX3mFUnEDiY7+du7nSUeyo1BEL6tm46q7gybC
 lRjyDWp5mz/a/JL3tmiOtavVbnyZ1iy03Nd5HfULUhsARJAQKbE+hAvBEhZGq2F2
 A4FKJgzRd7u5dBcaGLNf8H6UVml600ZX9GPkjH35tVXkqB6z87mQTfJmT6ViLKLU
 vM8AfGWbLQ==
 =DaMl
 -----END PGP SIGNATURE-----

Merge tag 'block-6.19-20260109' of git://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux

Pull block fixes from Jens Axboe:

 - Kill unlikely checks for blk-rq-qos. These checks are really
   all-or-nothing, either the branch is taken all the time, or it's not.
   Depending on the configuration, either one of those cases may be
   true. Just remove the annotation

 - Fix for merging bios with different app tags set

 - Fix for a recently introduced slowdown due to RCU synchronization

 - Fix for a status change on loop while it's in use, and then a later
   fix for that fix

 - Fix for the async partition scanning in ublk

* tag 'block-6.19-20260109' of git://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux:
  ublk: fix use-after-free in ublk_partition_scan_work
  blk-mq: avoid stall during boot due to synchronize_rcu_expedited
  loop: add missing bd_abort_claiming in loop_set_status
  block: don't merge bios with different app_tags
  blk-rq-qos: Remove unlikely() hints from QoS checks
  loop: don't change loop device under exclusive opener in loop_set_status
This commit is contained in:
Linus Torvalds 2026-01-09 15:42:46 -10:00
commit cb2076b091
5 changed files with 83 additions and 50 deletions

View File

@ -140,14 +140,21 @@ EXPORT_SYMBOL_GPL(blk_rq_integrity_map_user);
bool blk_integrity_merge_rq(struct request_queue *q, struct request *req,
struct request *next)
{
struct bio_integrity_payload *bip, *bip_next;
if (blk_integrity_rq(req) == 0 && blk_integrity_rq(next) == 0)
return true;
if (blk_integrity_rq(req) == 0 || blk_integrity_rq(next) == 0)
return false;
if (bio_integrity(req->bio)->bip_flags !=
bio_integrity(next->bio)->bip_flags)
bip = bio_integrity(req->bio);
bip_next = bio_integrity(next->bio);
if (bip->bip_flags != bip_next->bip_flags)
return false;
if (bip->bip_flags & BIP_CHECK_APPTAG &&
bip->app_tag != bip_next->app_tag)
return false;
if (req->nr_integrity_segments + next->nr_integrity_segments >
@ -163,15 +170,21 @@ bool blk_integrity_merge_rq(struct request_queue *q, struct request *req,
bool blk_integrity_merge_bio(struct request_queue *q, struct request *req,
struct bio *bio)
{
struct bio_integrity_payload *bip, *bip_bio = bio_integrity(bio);
int nr_integrity_segs;
if (blk_integrity_rq(req) == 0 && bio_integrity(bio) == NULL)
if (blk_integrity_rq(req) == 0 && bip_bio == NULL)
return true;
if (blk_integrity_rq(req) == 0 || bio_integrity(bio) == NULL)
if (blk_integrity_rq(req) == 0 || bip_bio == NULL)
return false;
if (bio_integrity(req->bio)->bip_flags != bio_integrity(bio)->bip_flags)
bip = bio_integrity(req->bio);
if (bip->bip_flags != bip_bio->bip_flags)
return false;
if (bip->bip_flags & BIP_CHECK_APPTAG &&
bip->app_tag != bip_bio->app_tag)
return false;
nr_integrity_segs = blk_rq_count_integrity_sg(q, bio);

View File

@ -4553,8 +4553,7 @@ static void __blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
* Make sure reading the old queue_hw_ctx from other
* context concurrently won't trigger uaf.
*/
synchronize_rcu_expedited();
kfree(hctxs);
kfree_rcu_mightsleep(hctxs);
hctxs = new_hctxs;
}

View File

@ -112,29 +112,26 @@ void __rq_qos_queue_depth_changed(struct rq_qos *rqos);
static inline void rq_qos_cleanup(struct request_queue *q, struct bio *bio)
{
if (unlikely(test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags)) &&
q->rq_qos)
if (test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags) && q->rq_qos)
__rq_qos_cleanup(q->rq_qos, bio);
}
static inline void rq_qos_done(struct request_queue *q, struct request *rq)
{
if (unlikely(test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags)) &&
q->rq_qos && !blk_rq_is_passthrough(rq))
if (test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags) &&
q->rq_qos && !blk_rq_is_passthrough(rq))
__rq_qos_done(q->rq_qos, rq);
}
static inline void rq_qos_issue(struct request_queue *q, struct request *rq)
{
if (unlikely(test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags)) &&
q->rq_qos)
if (test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags) && q->rq_qos)
__rq_qos_issue(q->rq_qos, rq);
}
static inline void rq_qos_requeue(struct request_queue *q, struct request *rq)
{
if (unlikely(test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags)) &&
q->rq_qos)
if (test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags) && q->rq_qos)
__rq_qos_requeue(q->rq_qos, rq);
}
@ -162,8 +159,7 @@ static inline void rq_qos_done_bio(struct bio *bio)
static inline void rq_qos_throttle(struct request_queue *q, struct bio *bio)
{
if (unlikely(test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags)) &&
q->rq_qos) {
if (test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags) && q->rq_qos) {
bio_set_flag(bio, BIO_QOS_THROTTLED);
__rq_qos_throttle(q->rq_qos, bio);
}
@ -172,16 +168,14 @@ static inline void rq_qos_throttle(struct request_queue *q, struct bio *bio)
static inline void rq_qos_track(struct request_queue *q, struct request *rq,
struct bio *bio)
{
if (unlikely(test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags)) &&
q->rq_qos)
if (test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags) && q->rq_qos)
__rq_qos_track(q->rq_qos, rq, bio);
}
static inline void rq_qos_merge(struct request_queue *q, struct request *rq,
struct bio *bio)
{
if (unlikely(test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags)) &&
q->rq_qos) {
if (test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags) && q->rq_qos) {
bio_set_flag(bio, BIO_QOS_MERGED);
__rq_qos_merge(q->rq_qos, rq, bio);
}
@ -189,8 +183,7 @@ static inline void rq_qos_merge(struct request_queue *q, struct request *rq,
static inline void rq_qos_queue_depth_changed(struct request_queue *q)
{
if (unlikely(test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags)) &&
q->rq_qos)
if (test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags) && q->rq_qos)
__rq_qos_queue_depth_changed(q->rq_qos);
}

View File

@ -1225,16 +1225,28 @@ static int loop_clr_fd(struct loop_device *lo)
}
static int
loop_set_status(struct loop_device *lo, const struct loop_info64 *info)
loop_set_status(struct loop_device *lo, blk_mode_t mode,
struct block_device *bdev, const struct loop_info64 *info)
{
int err;
bool partscan = false;
bool size_changed = false;
unsigned int memflags;
/*
* If we don't hold exclusive handle for the device, upgrade to it
* here to avoid changing device under exclusive owner.
*/
if (!(mode & BLK_OPEN_EXCL)) {
err = bd_prepare_to_claim(bdev, loop_set_status, NULL);
if (err)
goto out_reread_partitions;
}
err = mutex_lock_killable(&lo->lo_mutex);
if (err)
return err;
goto out_abort_claiming;
if (lo->lo_state != Lo_bound) {
err = -ENXIO;
goto out_unlock;
@ -1273,6 +1285,10 @@ out_unfreeze:
}
out_unlock:
mutex_unlock(&lo->lo_mutex);
out_abort_claiming:
if (!(mode & BLK_OPEN_EXCL))
bd_abort_claiming(bdev, loop_set_status);
out_reread_partitions:
if (partscan)
loop_reread_partitions(lo);
@ -1352,7 +1368,9 @@ loop_info64_to_old(const struct loop_info64 *info64, struct loop_info *info)
}
static int
loop_set_status_old(struct loop_device *lo, const struct loop_info __user *arg)
loop_set_status_old(struct loop_device *lo, blk_mode_t mode,
struct block_device *bdev,
const struct loop_info __user *arg)
{
struct loop_info info;
struct loop_info64 info64;
@ -1360,17 +1378,19 @@ loop_set_status_old(struct loop_device *lo, const struct loop_info __user *arg)
if (copy_from_user(&info, arg, sizeof (struct loop_info)))
return -EFAULT;
loop_info64_from_old(&info, &info64);
return loop_set_status(lo, &info64);
return loop_set_status(lo, mode, bdev, &info64);
}
static int
loop_set_status64(struct loop_device *lo, const struct loop_info64 __user *arg)
loop_set_status64(struct loop_device *lo, blk_mode_t mode,
struct block_device *bdev,
const struct loop_info64 __user *arg)
{
struct loop_info64 info64;
if (copy_from_user(&info64, arg, sizeof (struct loop_info64)))
return -EFAULT;
return loop_set_status(lo, &info64);
return loop_set_status(lo, mode, bdev, &info64);
}
static int
@ -1549,14 +1569,14 @@ static int lo_ioctl(struct block_device *bdev, blk_mode_t mode,
case LOOP_SET_STATUS:
err = -EPERM;
if ((mode & BLK_OPEN_WRITE) || capable(CAP_SYS_ADMIN))
err = loop_set_status_old(lo, argp);
err = loop_set_status_old(lo, mode, bdev, argp);
break;
case LOOP_GET_STATUS:
return loop_get_status_old(lo, argp);
case LOOP_SET_STATUS64:
err = -EPERM;
if ((mode & BLK_OPEN_WRITE) || capable(CAP_SYS_ADMIN))
err = loop_set_status64(lo, argp);
err = loop_set_status64(lo, mode, bdev, argp);
break;
case LOOP_GET_STATUS64:
return loop_get_status64(lo, argp);
@ -1650,8 +1670,9 @@ loop_info64_to_compat(const struct loop_info64 *info64,
}
static int
loop_set_status_compat(struct loop_device *lo,
const struct compat_loop_info __user *arg)
loop_set_status_compat(struct loop_device *lo, blk_mode_t mode,
struct block_device *bdev,
const struct compat_loop_info __user *arg)
{
struct loop_info64 info64;
int ret;
@ -1659,7 +1680,7 @@ loop_set_status_compat(struct loop_device *lo,
ret = loop_info64_from_compat(arg, &info64);
if (ret < 0)
return ret;
return loop_set_status(lo, &info64);
return loop_set_status(lo, mode, bdev, &info64);
}
static int
@ -1685,7 +1706,7 @@ static int lo_compat_ioctl(struct block_device *bdev, blk_mode_t mode,
switch(cmd) {
case LOOP_SET_STATUS:
err = loop_set_status_compat(lo,
err = loop_set_status_compat(lo, mode, bdev,
(const struct compat_loop_info __user *)arg);
break;
case LOOP_GET_STATUS:

View File

@ -255,20 +255,6 @@ static inline struct request *__ublk_check_and_get_req(struct ublk_device *ub,
u16 q_id, u16 tag, struct ublk_io *io, size_t offset);
static inline unsigned int ublk_req_build_flags(struct request *req);
static void ublk_partition_scan_work(struct work_struct *work)
{
struct ublk_device *ub =
container_of(work, struct ublk_device, partition_scan_work);
if (WARN_ON_ONCE(!test_and_clear_bit(GD_SUPPRESS_PART_SCAN,
&ub->ub_disk->state)))
return;
mutex_lock(&ub->ub_disk->open_mutex);
bdev_disk_changed(ub->ub_disk, false);
mutex_unlock(&ub->ub_disk->open_mutex);
}
static inline struct ublksrv_io_desc *
ublk_get_iod(const struct ublk_queue *ubq, unsigned tag)
{
@ -1597,6 +1583,27 @@ static void ublk_put_disk(struct gendisk *disk)
put_device(disk_to_dev(disk));
}
static void ublk_partition_scan_work(struct work_struct *work)
{
struct ublk_device *ub =
container_of(work, struct ublk_device, partition_scan_work);
/* Hold disk reference to prevent UAF during concurrent teardown */
struct gendisk *disk = ublk_get_disk(ub);
if (!disk)
return;
if (WARN_ON_ONCE(!test_and_clear_bit(GD_SUPPRESS_PART_SCAN,
&disk->state)))
goto out;
mutex_lock(&disk->open_mutex);
bdev_disk_changed(disk, false);
mutex_unlock(&disk->open_mutex);
out:
ublk_put_disk(disk);
}
/*
* Use this function to ensure that ->canceling is consistently set for
* the device and all queues. Do not set these flags directly.
@ -2041,7 +2048,7 @@ static void ublk_stop_dev(struct ublk_device *ub)
mutex_lock(&ub->mutex);
ublk_stop_dev_unlocked(ub);
mutex_unlock(&ub->mutex);
flush_work(&ub->partition_scan_work);
cancel_work_sync(&ub->partition_scan_work);
ublk_cancel_dev(ub);
}