1
0
mirror of https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git synced 2026-01-11 17:10:13 +00:00

block-6.19-20260102

-----BEGIN PGP SIGNATURE-----
 
 iQJEBAABCAAuFiEEwPw5LcreJtl1+l5K99NY+ylx4KYFAmlX7MMQHGF4Ym9lQGtl
 cm5lbC5kawAKCRD301j7KXHgpvuYEACG0VFYmcqmB4JZygecJB3xaxhbVIrCbjFv
 Vmc0XNTkcCpjYAv1jpkS5F3nkJhzZlFNn9xOaP/O8E+6tSctFIre7qjMRpxZM3yl
 GA+MqPI+zNbpYMgsoAH/XTASTVfaTEPOlaoAPQeo8Ey3JRw3Ko1IDNU7zIYK94Xl
 rSAeT65W7vJ+HBjctBoCZYMsE2x0Sn0yrVctkL1mMusQwIg6oMhJ1w1p36P17Mc1
 YgLWQYtfK+eogdTM0Jh9RvDtVJL3WT1I2Ii3KBdCgryY7iSxFXvM0pm1lrOBH+kI
 4bKHTylBnjfmxv7dlz3jHwRmahwdXDk7rpq1EMPygDSj835h3SgAFz3rm9nCUjNI
 xWyEZeN6z4ykdOlqJ6ghTnZTroRdM/12HbSV46n69tczxepG3Mn1i3gBd4UQhn5T
 z6aqa7akIsynlzOnLgrwQjxgVhtfAHptrgAg7g7Kz9hq9xTAEPc2f9Nq7glmLP6f
 wPMoy2lla69vk4Tlzh8TZpTHRPLYLHTtL5OQPM6dnyQ6MzWm2/PHJ/MNfV7/o+VR
 W61BYXUz6d2q81c/I16VWVQvJ0nUa3v7hUGCLUeimQUg+ulyIlMX4wrOI7iYTFTy
 V/4c3DHKEh9y/ptmCgv0jDZdwSoUYvXkn0vFe0fcF3q/T7xea4dok8mcXLcKhMuc
 xPFtx92dhQ==
 =4NB3
 -----END PGP SIGNATURE-----

Merge tag 'block-6.19-20260102' of git://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux

Pull block fixes from Jens Axboe:

 - Scan partition tables asynchronously for ublk, similarly to how nvme
   does it. This avoids potential deadlocks, which is why nvme does it
   that way too. Includes a set of selftests as well.

 - MD pull request via Yu:
     - Fix null-pointer dereference in raid5 sysfs group_thread_cnt
       store (Tuo Li)
     - Fix possible mempool corruption during raid1 raid_disks update
       via sysfs (FengWei Shih)
     - Fix logical_block_size configuration being overwritten during
       super_1_validate() (Li Nan)
     - Fix forward incompatibility with configurable logical block size:
       arrays assembled on new kernels could not be assembled on older
       kernels (v6.18 and before) due to non-zero reserved pad rejection
       (Li Nan)
     - Fix static checker warning about iterator not incremented (Li Nan)

 - Skip CPU offlining notifications on unmapped hardware queues

 - bfq-iosched block stats fix

 - Fix outdated comment in bfq-iosched

* tag 'block-6.19-20260102' of git://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux:
  block, bfq: update outdated comment
  blk-mq: skip CPU offline notify on unmapped hctx
  selftests/ublk: fix Makefile to rebuild on header changes
  selftests/ublk: add test for async partition scan
  ublk: scan partition in async way
  block,bfq: fix aux stat accumulation destination
  md: Fix forward incompatibility from configurable logical block size
  md: Fix logical_block_size configuration being overwritten
  md: suspend array while updating raid_disks via sysfs
  md/raid5: fix possible null-pointer dereferences in raid5_store_group_thread_cnt()
  md: Fix static checker warning in analyze_sbs
This commit is contained in:
Linus Torvalds 2026-01-02 12:15:59 -08:00
commit bea82c80a5
9 changed files with 174 additions and 27 deletions

View File

@ -380,7 +380,7 @@ static void bfqg_stats_add_aux(struct bfqg_stats *to, struct bfqg_stats *from)
blkg_rwstat_add_aux(&to->merged, &from->merged); blkg_rwstat_add_aux(&to->merged, &from->merged);
blkg_rwstat_add_aux(&to->service_time, &from->service_time); blkg_rwstat_add_aux(&to->service_time, &from->service_time);
blkg_rwstat_add_aux(&to->wait_time, &from->wait_time); blkg_rwstat_add_aux(&to->wait_time, &from->wait_time);
bfq_stat_add_aux(&from->time, &from->time); bfq_stat_add_aux(&to->time, &from->time);
bfq_stat_add_aux(&to->avg_queue_size_sum, &from->avg_queue_size_sum); bfq_stat_add_aux(&to->avg_queue_size_sum, &from->avg_queue_size_sum);
bfq_stat_add_aux(&to->avg_queue_size_samples, bfq_stat_add_aux(&to->avg_queue_size_samples,
&from->avg_queue_size_samples); &from->avg_queue_size_samples);

View File

@ -984,7 +984,7 @@ struct bfq_group_data {
* unused for the root group. Used to know whether there * unused for the root group. Used to know whether there
* are groups with more than one active @bfq_entity * are groups with more than one active @bfq_entity
* (see the comments to the function * (see the comments to the function
* bfq_bfqq_may_idle()). * bfq_better_to_idle()).
* @rq_pos_tree: rbtree sorted by next_request position, used when * @rq_pos_tree: rbtree sorted by next_request position, used when
* determining if two or more queues have interleaving * determining if two or more queues have interleaving
* requests (see bfq_find_close_cooperator()). * requests (see bfq_find_close_cooperator()).

View File

@ -3721,7 +3721,7 @@ static int blk_mq_hctx_notify_offline(unsigned int cpu, struct hlist_node *node)
struct blk_mq_hw_ctx, cpuhp_online); struct blk_mq_hw_ctx, cpuhp_online);
int ret = 0; int ret = 0;
if (blk_mq_hctx_has_online_cpu(hctx, cpu)) if (!hctx->nr_ctx || blk_mq_hctx_has_online_cpu(hctx, cpu))
return 0; return 0;
/* /*

View File

@ -237,6 +237,7 @@ struct ublk_device {
bool canceling; bool canceling;
pid_t ublksrv_tgid; pid_t ublksrv_tgid;
struct delayed_work exit_work; struct delayed_work exit_work;
struct work_struct partition_scan_work;
struct ublk_queue *queues[]; struct ublk_queue *queues[];
}; };
@ -254,6 +255,20 @@ static inline struct request *__ublk_check_and_get_req(struct ublk_device *ub,
u16 q_id, u16 tag, struct ublk_io *io, size_t offset); u16 q_id, u16 tag, struct ublk_io *io, size_t offset);
static inline unsigned int ublk_req_build_flags(struct request *req); static inline unsigned int ublk_req_build_flags(struct request *req);
static void ublk_partition_scan_work(struct work_struct *work)
{
struct ublk_device *ub =
container_of(work, struct ublk_device, partition_scan_work);
if (WARN_ON_ONCE(!test_and_clear_bit(GD_SUPPRESS_PART_SCAN,
&ub->ub_disk->state)))
return;
mutex_lock(&ub->ub_disk->open_mutex);
bdev_disk_changed(ub->ub_disk, false);
mutex_unlock(&ub->ub_disk->open_mutex);
}
static inline struct ublksrv_io_desc * static inline struct ublksrv_io_desc *
ublk_get_iod(const struct ublk_queue *ubq, unsigned tag) ublk_get_iod(const struct ublk_queue *ubq, unsigned tag)
{ {
@ -2026,6 +2041,7 @@ static void ublk_stop_dev(struct ublk_device *ub)
mutex_lock(&ub->mutex); mutex_lock(&ub->mutex);
ublk_stop_dev_unlocked(ub); ublk_stop_dev_unlocked(ub);
mutex_unlock(&ub->mutex); mutex_unlock(&ub->mutex);
flush_work(&ub->partition_scan_work);
ublk_cancel_dev(ub); ublk_cancel_dev(ub);
} }
@ -2954,9 +2970,17 @@ static int ublk_ctrl_start_dev(struct ublk_device *ub,
ublk_apply_params(ub); ublk_apply_params(ub);
/* don't probe partitions if any daemon task is un-trusted */ /*
if (ub->unprivileged_daemons) * Suppress partition scan to avoid potential IO hang.
set_bit(GD_SUPPRESS_PART_SCAN, &disk->state); *
* If ublk server error occurs during partition scan, the IO may
* wait while holding ub->mutex, which can deadlock with other
* operations that need the mutex. Defer partition scan to async
* work.
* For unprivileged daemons, keep GD_SUPPRESS_PART_SCAN set
* permanently.
*/
set_bit(GD_SUPPRESS_PART_SCAN, &disk->state);
ublk_get_device(ub); ublk_get_device(ub);
ub->dev_info.state = UBLK_S_DEV_LIVE; ub->dev_info.state = UBLK_S_DEV_LIVE;
@ -2973,6 +2997,10 @@ static int ublk_ctrl_start_dev(struct ublk_device *ub,
set_bit(UB_STATE_USED, &ub->state); set_bit(UB_STATE_USED, &ub->state);
/* Schedule async partition scan for trusted daemons */
if (!ub->unprivileged_daemons)
schedule_work(&ub->partition_scan_work);
out_put_cdev: out_put_cdev:
if (ret) { if (ret) {
ublk_detach_disk(ub); ublk_detach_disk(ub);
@ -3138,6 +3166,7 @@ static int ublk_ctrl_add_dev(const struct ublksrv_ctrl_cmd *header)
mutex_init(&ub->mutex); mutex_init(&ub->mutex);
spin_lock_init(&ub->lock); spin_lock_init(&ub->lock);
mutex_init(&ub->cancel_mutex); mutex_init(&ub->cancel_mutex);
INIT_WORK(&ub->partition_scan_work, ublk_partition_scan_work);
ret = ublk_alloc_dev_number(ub, header->dev_id); ret = ublk_alloc_dev_number(ub, header->dev_id);
if (ret < 0) if (ret < 0)

View File

@ -1999,7 +1999,6 @@ static int super_1_validate(struct mddev *mddev, struct md_rdev *freshest, struc
mddev->layout = le32_to_cpu(sb->layout); mddev->layout = le32_to_cpu(sb->layout);
mddev->raid_disks = le32_to_cpu(sb->raid_disks); mddev->raid_disks = le32_to_cpu(sb->raid_disks);
mddev->dev_sectors = le64_to_cpu(sb->size); mddev->dev_sectors = le64_to_cpu(sb->size);
mddev->logical_block_size = le32_to_cpu(sb->logical_block_size);
mddev->events = ev1; mddev->events = ev1;
mddev->bitmap_info.offset = 0; mddev->bitmap_info.offset = 0;
mddev->bitmap_info.space = 0; mddev->bitmap_info.space = 0;
@ -2015,6 +2014,9 @@ static int super_1_validate(struct mddev *mddev, struct md_rdev *freshest, struc
mddev->max_disks = (4096-256)/2; mddev->max_disks = (4096-256)/2;
if (!mddev->logical_block_size)
mddev->logical_block_size = le32_to_cpu(sb->logical_block_size);
if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_BITMAP_OFFSET) && if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_BITMAP_OFFSET) &&
mddev->bitmap_info.file == NULL) { mddev->bitmap_info.file == NULL) {
mddev->bitmap_info.offset = mddev->bitmap_info.offset =
@ -3882,7 +3884,6 @@ out_free_rdev:
static int analyze_sbs(struct mddev *mddev) static int analyze_sbs(struct mddev *mddev)
{ {
int i;
struct md_rdev *rdev, *freshest, *tmp; struct md_rdev *rdev, *freshest, *tmp;
freshest = NULL; freshest = NULL;
@ -3909,11 +3910,9 @@ static int analyze_sbs(struct mddev *mddev)
super_types[mddev->major_version]. super_types[mddev->major_version].
validate_super(mddev, NULL/*freshest*/, freshest); validate_super(mddev, NULL/*freshest*/, freshest);
i = 0;
rdev_for_each_safe(rdev, tmp, mddev) { rdev_for_each_safe(rdev, tmp, mddev) {
if (mddev->max_disks && if (mddev->max_disks &&
(rdev->desc_nr >= mddev->max_disks || rdev->desc_nr >= mddev->max_disks) {
i > mddev->max_disks)) {
pr_warn("md: %s: %pg: only %d devices permitted\n", pr_warn("md: %s: %pg: only %d devices permitted\n",
mdname(mddev), rdev->bdev, mdname(mddev), rdev->bdev,
mddev->max_disks); mddev->max_disks);
@ -4407,7 +4406,7 @@ raid_disks_store(struct mddev *mddev, const char *buf, size_t len)
if (err < 0) if (err < 0)
return err; return err;
err = mddev_lock(mddev); err = mddev_suspend_and_lock(mddev);
if (err) if (err)
return err; return err;
if (mddev->pers) if (mddev->pers)
@ -4432,7 +4431,7 @@ raid_disks_store(struct mddev *mddev, const char *buf, size_t len)
} else } else
mddev->raid_disks = n; mddev->raid_disks = n;
out_unlock: out_unlock:
mddev_unlock(mddev); mddev_unlock_and_resume(mddev);
return err ? err : len; return err ? err : len;
} }
static struct md_sysfs_entry md_raid_disks = static struct md_sysfs_entry md_raid_disks =
@ -5981,13 +5980,33 @@ lbs_store(struct mddev *mddev, const char *buf, size_t len)
if (mddev->major_version == 0) if (mddev->major_version == 0)
return -EINVAL; return -EINVAL;
if (mddev->pers)
return -EBUSY;
err = kstrtouint(buf, 10, &lbs); err = kstrtouint(buf, 10, &lbs);
if (err < 0) if (err < 0)
return -EINVAL; return -EINVAL;
if (mddev->pers) {
unsigned int curr_lbs;
if (mddev->logical_block_size)
return -EBUSY;
/*
* To fix forward compatibility issues, LBS is not
* configured for arrays from old kernels (<=6.18) by default.
* If the user confirms no rollback to old kernels,
* enable LBS by writing current LBS to prevent data
* loss from LBS changes.
*/
curr_lbs = queue_logical_block_size(mddev->gendisk->queue);
if (lbs != curr_lbs)
return -EINVAL;
mddev->logical_block_size = curr_lbs;
set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
pr_info("%s: logical block size configured successfully, array will not be assembled in old kernels (<= 6.18)\n",
mdname(mddev));
return len;
}
err = mddev_lock(mddev); err = mddev_lock(mddev);
if (err) if (err)
goto unlock; goto unlock;
@ -6163,7 +6182,27 @@ int mddev_stack_rdev_limits(struct mddev *mddev, struct queue_limits *lim,
mdname(mddev)); mdname(mddev));
return -EINVAL; return -EINVAL;
} }
mddev->logical_block_size = lim->logical_block_size;
/* Only 1.x meta needs to set logical block size */
if (mddev->major_version == 0)
return 0;
/*
* Fix forward compatibility issue. Only set LBS by default for
* new arrays, mddev->events == 0 indicates the array was just
* created. When assembling an array, read LBS from the superblock
* instead LBS is 0 in superblocks created by old kernels.
*/
if (!mddev->events) {
pr_info("%s: array will not be assembled in old kernels that lack configurable LBS support (<= 6.18)\n",
mdname(mddev));
mddev->logical_block_size = lim->logical_block_size;
}
if (!mddev->logical_block_size)
pr_warn("%s: echo current LBS to md/logical_block_size to prevent data loss issues from LBS changes.\n"
"\tNote: After setting, array will not be assembled in old kernels (<= 6.18)\n",
mdname(mddev));
return 0; return 0;
} }

View File

@ -7187,12 +7187,14 @@ raid5_store_group_thread_cnt(struct mddev *mddev, const char *page, size_t len)
err = mddev_suspend_and_lock(mddev); err = mddev_suspend_and_lock(mddev);
if (err) if (err)
return err; return err;
conf = mddev->private;
if (!conf) {
mddev_unlock_and_resume(mddev);
return -ENODEV;
}
raid5_quiesce(mddev, true); raid5_quiesce(mddev, true);
conf = mddev->private; if (new != conf->worker_cnt_per_group) {
if (!conf)
err = -ENODEV;
else if (new != conf->worker_cnt_per_group) {
old_groups = conf->worker_groups; old_groups = conf->worker_groups;
if (old_groups) if (old_groups)
flush_workqueue(raid5_wq); flush_workqueue(raid5_wq);

View File

@ -22,6 +22,7 @@ TEST_PROGS += test_generic_11.sh
TEST_PROGS += test_generic_12.sh TEST_PROGS += test_generic_12.sh
TEST_PROGS += test_generic_13.sh TEST_PROGS += test_generic_13.sh
TEST_PROGS += test_generic_14.sh TEST_PROGS += test_generic_14.sh
TEST_PROGS += test_generic_15.sh
TEST_PROGS += test_null_01.sh TEST_PROGS += test_null_01.sh
TEST_PROGS += test_null_02.sh TEST_PROGS += test_null_02.sh
@ -50,10 +51,10 @@ TEST_PROGS += test_stress_07.sh
TEST_GEN_PROGS_EXTENDED = kublk TEST_GEN_PROGS_EXTENDED = kublk
LOCAL_HDRS += $(wildcard *.h)
include ../lib.mk include ../lib.mk
$(TEST_GEN_PROGS_EXTENDED): kublk.c null.c file_backed.c common.c stripe.c \ $(TEST_GEN_PROGS_EXTENDED): $(wildcard *.c)
fault_inject.c
check: check:
shellcheck -x -f gcc *.sh shellcheck -x -f gcc *.sh

View File

@ -178,8 +178,9 @@ _have_feature()
_create_ublk_dev() { _create_ublk_dev() {
local dev_id; local dev_id;
local cmd=$1 local cmd=$1
local settle=$2
shift 1 shift 2
if [ ! -c /dev/ublk-control ]; then if [ ! -c /dev/ublk-control ]; then
return ${UBLK_SKIP_CODE} return ${UBLK_SKIP_CODE}
@ -194,7 +195,10 @@ _create_ublk_dev() {
echo "fail to add ublk dev $*" echo "fail to add ublk dev $*"
return 255 return 255
fi fi
udevadm settle
if [ "$settle" = "yes" ]; then
udevadm settle
fi
if [[ "$dev_id" =~ ^[0-9]+$ ]]; then if [[ "$dev_id" =~ ^[0-9]+$ ]]; then
echo "${dev_id}" echo "${dev_id}"
@ -204,14 +208,18 @@ _create_ublk_dev() {
} }
_add_ublk_dev() { _add_ublk_dev() {
_create_ublk_dev "add" "$@" _create_ublk_dev "add" "yes" "$@"
}
_add_ublk_dev_no_settle() {
_create_ublk_dev "add" "no" "$@"
} }
_recover_ublk_dev() { _recover_ublk_dev() {
local dev_id local dev_id
local state local state
dev_id=$(_create_ublk_dev "recover" "$@") dev_id=$(_create_ublk_dev "recover" "yes" "$@")
for ((j=0;j<20;j++)); do for ((j=0;j<20;j++)); do
state=$(_get_ublk_dev_state "${dev_id}") state=$(_get_ublk_dev_state "${dev_id}")
[ "$state" == "LIVE" ] && break [ "$state" == "LIVE" ] && break

View File

@ -0,0 +1,68 @@
#!/bin/bash
# SPDX-License-Identifier: GPL-2.0
. "$(cd "$(dirname "$0")" && pwd)"/test_common.sh
TID="generic_15"
ERR_CODE=0
_test_partition_scan_no_hang()
{
local recovery_flag=$1
local expected_state=$2
local dev_id
local state
local daemon_pid
local start_time
local elapsed
# Create ublk device with fault_inject target and very large delay
# to simulate hang during partition table read
# --delay_us 60000000 = 60 seconds delay
# Use _add_ublk_dev_no_settle to avoid udevadm settle hang waiting
# for partition scan events to complete
if [ "$recovery_flag" = "yes" ]; then
echo "Testing partition scan with recovery support..."
dev_id=$(_add_ublk_dev_no_settle -t fault_inject -q 1 -d 1 --delay_us 60000000 -r 1)
else
echo "Testing partition scan without recovery..."
dev_id=$(_add_ublk_dev_no_settle -t fault_inject -q 1 -d 1 --delay_us 60000000)
fi
_check_add_dev "$TID" $?
# The add command should return quickly because partition scan is async.
# Now sleep briefly to let the async partition scan work start and hit
# the delay in the fault_inject handler.
sleep 1
# Kill the ublk daemon while partition scan is potentially blocked
# And check state transitions properly
start_time=${SECONDS}
daemon_pid=$(_get_ublk_daemon_pid "${dev_id}")
state=$(__ublk_kill_daemon "${dev_id}" "${expected_state}")
elapsed=$((SECONDS - start_time))
# Verify the device transitioned to expected state
if [ "$state" != "${expected_state}" ]; then
echo "FAIL: Device state is $state, expected ${expected_state}"
ERR_CODE=255
${UBLK_PROG} del -n "${dev_id}" > /dev/null 2>&1
return
fi
echo "PASS: Device transitioned to ${expected_state} in ${elapsed}s without hanging"
# Clean up the device
${UBLK_PROG} del -n "${dev_id}" > /dev/null 2>&1
}
_prep_test "partition_scan" "verify async partition scan prevents IO hang"
# Test 1: Without recovery support - should transition to DEAD
_test_partition_scan_no_hang "no" "DEAD"
# Test 2: With recovery support - should transition to QUIESCED
_test_partition_scan_no_hang "yes" "QUIESCED"
_cleanup_test "partition_scan"
_show_result $TID $ERR_CODE