1
0
mirror of https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git synced 2026-01-11 17:10:13 +00:00

Driver core fixes for 6.18-rc3

- In Device::parent(), do not make any assumptions on the device
     context of the parent device.
 
   - Check visibility before changing ownership of a sysfs attribute
     group.
 
   - In topology_parse_cpu_capacity(), replace an incorrect usage of
     PTR_ERR_OR_ZERO() with IS_ERR_OR_NULL().
 
   - In devcoredump, fix a circular locking dependency between
     struct devcd_entry::mutex and kernfs.
 
   - Do not warn about a pending fw_devlink sync state.
 -----BEGIN PGP SIGNATURE-----
 
 iHUEABYKAB0WIQS2q/xV6QjXAdC7k+1FlHeO1qrKLgUCaPyrfAAKCRBFlHeO1qrK
 LoXVAP9tGeaWsoQgYUSBDZAGysWqwzar0xl27IOe40Mgg6xWDgEAmzPHt6KeQS7d
 XhwHeFVRyQ8e04tPSlhI7qSLdeLLiwo=
 =ID4F
 -----END PGP SIGNATURE-----

Merge tag 'driver-core-6.18-rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/driver-core/driver-core

Pull driver core fixes from Danilo Krummrich:

 - In Device::parent(), do not make any assumptions on the device
   context of the parent device

 - Check visibility before changing ownership of a sysfs attribute
   group

 - In topology_parse_cpu_capacity(), replace an incorrect usage of
   PTR_ERR_OR_ZERO() with IS_ERR_OR_NULL()

 - In devcoredump, fix a circular locking dependency between
   struct devcd_entry::mutex and kernfs

 - Do not warn about a pending fw_devlink sync state

* tag 'driver-core-6.18-rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/driver-core/driver-core:
  arch_topology: Fix incorrect error check in topology_parse_cpu_capacity()
  rust: device: fix device context of Device::parent()
  sysfs: check visibility before changing group attribute ownership
  devcoredump: Fix circular locking dependency with devcd->mutex.
  driver core: fw_devlink: Don't warn about sync_state() pending
This commit is contained in:
Linus Torvalds 2025-10-25 11:03:46 -07:00
commit 72761a7e31
6 changed files with 109 additions and 69 deletions

View File

@ -292,7 +292,7 @@ bool __init topology_parse_cpu_capacity(struct device_node *cpu_node, int cpu)
* frequency (by keeping the initial capacity_freq_ref value).
*/
cpu_clk = of_clk_get(cpu_node, 0);
if (!PTR_ERR_OR_ZERO(cpu_clk)) {
if (!IS_ERR_OR_NULL(cpu_clk)) {
per_cpu(capacity_freq_ref, cpu) =
clk_get_rate(cpu_clk) / HZ_PER_KHZ;
clk_put(cpu_clk);

View File

@ -1784,7 +1784,7 @@ static int fw_devlink_dev_sync_state(struct device *dev, void *data)
return 0;
if (fw_devlink_sync_state == FW_DEVLINK_SYNC_STATE_STRICT) {
dev_warn(sup, "sync_state() pending due to %s\n",
dev_info(sup, "sync_state() pending due to %s\n",
dev_name(link->consumer));
return 0;
}

View File

@ -23,50 +23,46 @@ struct devcd_entry {
void *data;
size_t datalen;
/*
* Here, mutex is required to serialize the calls to del_wk work between
* user/kernel space which happens when devcd is added with device_add()
* and that sends uevent to user space. User space reads the uevents,
* and calls to devcd_data_write() which try to modify the work which is
* not even initialized/queued from devcoredump.
* There are 2 races for which mutex is required.
*
* The first race is between device creation and userspace writing to
* schedule immediately destruction.
*
* This race is handled by arming the timer before device creation, but
* when device creation fails the timer still exists.
*
* cpu0(X) cpu1(Y)
* To solve this, hold the mutex during device_add(), and set
* init_completed on success before releasing the mutex.
*
* dev_coredump() uevent sent to user space
* device_add() ======================> user space process Y reads the
* uevents writes to devcd fd
* which results into writes to
* That way the timer will never fire until device_add() is called,
* it will do nothing if init_completed is not set. The timer is also
* cancelled in that case.
*
* devcd_data_write()
* mod_delayed_work()
* try_to_grab_pending()
* timer_delete()
* debug_assert_init()
* INIT_DELAYED_WORK()
* schedule_delayed_work()
*
*
* Also, mutex alone would not be enough to avoid scheduling of
* del_wk work after it get flush from a call to devcd_free()
* mentioned as below.
*
* disabled_store()
* devcd_free()
* mutex_lock() devcd_data_write()
* flush_delayed_work()
* mutex_unlock()
* mutex_lock()
* mod_delayed_work()
* mutex_unlock()
* So, delete_work flag is required.
* The second race involves multiple parallel invocations of devcd_free(),
* add a deleted flag so only 1 can call the destructor.
*/
struct mutex mutex;
bool delete_work;
bool init_completed, deleted;
struct module *owner;
ssize_t (*read)(char *buffer, loff_t offset, size_t count,
void *data, size_t datalen);
void (*free)(void *data);
/*
* If nothing interferes and device_add() was returns success,
* del_wk will destroy the device after the timer fires.
*
* Multiple userspace processes can interfere in the working of the timer:
* - Writing to the coredump will reschedule the timer to run immediately,
* if still armed.
*
* This is handled by using "if (cancel_delayed_work()) {
* schedule_delayed_work() }", to prevent re-arming after having
* been previously fired.
* - Writing to /sys/class/devcoredump/disabled will destroy the
* coredump synchronously.
* This is handled by using disable_delayed_work_sync(), and then
* checking if deleted flag is set with &devcd->mutex held.
*/
struct delayed_work del_wk;
struct device *failing_dev;
};
@ -95,14 +91,27 @@ static void devcd_dev_release(struct device *dev)
kfree(devcd);
}
static void __devcd_del(struct devcd_entry *devcd)
{
devcd->deleted = true;
device_del(&devcd->devcd_dev);
put_device(&devcd->devcd_dev);
}
static void devcd_del(struct work_struct *wk)
{
struct devcd_entry *devcd;
bool init_completed;
devcd = container_of(wk, struct devcd_entry, del_wk.work);
device_del(&devcd->devcd_dev);
put_device(&devcd->devcd_dev);
/* devcd->mutex serializes against dev_coredumpm_timeout */
mutex_lock(&devcd->mutex);
init_completed = devcd->init_completed;
mutex_unlock(&devcd->mutex);
if (init_completed)
__devcd_del(devcd);
}
static ssize_t devcd_data_read(struct file *filp, struct kobject *kobj,
@ -122,12 +131,12 @@ static ssize_t devcd_data_write(struct file *filp, struct kobject *kobj,
struct device *dev = kobj_to_dev(kobj);
struct devcd_entry *devcd = dev_to_devcd(dev);
mutex_lock(&devcd->mutex);
if (!devcd->delete_work) {
devcd->delete_work = true;
mod_delayed_work(system_wq, &devcd->del_wk, 0);
}
mutex_unlock(&devcd->mutex);
/*
* Although it's tempting to use mod_delayed work here,
* that will cause a reschedule if the timer already fired.
*/
if (cancel_delayed_work(&devcd->del_wk))
schedule_delayed_work(&devcd->del_wk, 0);
return count;
}
@ -151,11 +160,21 @@ static int devcd_free(struct device *dev, void *data)
{
struct devcd_entry *devcd = dev_to_devcd(dev);
/*
* To prevent a race with devcd_data_write(), disable work and
* complete manually instead.
*
* We cannot rely on the return value of
* disable_delayed_work_sync() here, because it might be in the
* middle of a cancel_delayed_work + schedule_delayed_work pair.
*
* devcd->mutex here guards against multiple parallel invocations
* of devcd_free().
*/
disable_delayed_work_sync(&devcd->del_wk);
mutex_lock(&devcd->mutex);
if (!devcd->delete_work)
devcd->delete_work = true;
flush_delayed_work(&devcd->del_wk);
if (!devcd->deleted)
__devcd_del(devcd);
mutex_unlock(&devcd->mutex);
return 0;
}
@ -179,12 +198,10 @@ static ssize_t disabled_show(const struct class *class, const struct class_attri
* put_device() <- last reference
* error = fn(dev, data) devcd_dev_release()
* devcd_free(dev, data) kfree(devcd)
* mutex_lock(&devcd->mutex);
*
*
* In the above diagram, it looks like disabled_store() would be racing with parallelly
* running devcd_del() and result in memory abort while acquiring devcd->mutex which
* is called after kfree of devcd memory after dropping its last reference with
* running devcd_del() and result in memory abort after dropping its last reference with
* put_device(). However, this will not happens as fn(dev, data) runs
* with its own reference to device via klist_node so it is not its last reference.
* so, above situation would not occur.
@ -374,7 +391,7 @@ void dev_coredumpm_timeout(struct device *dev, struct module *owner,
devcd->read = read;
devcd->free = free;
devcd->failing_dev = get_device(dev);
devcd->delete_work = false;
devcd->deleted = false;
mutex_init(&devcd->mutex);
device_initialize(&devcd->devcd_dev);
@ -383,8 +400,14 @@ void dev_coredumpm_timeout(struct device *dev, struct module *owner,
atomic_inc_return(&devcd_count));
devcd->devcd_dev.class = &devcd_class;
mutex_lock(&devcd->mutex);
dev_set_uevent_suppress(&devcd->devcd_dev, true);
/* devcd->mutex prevents devcd_del() completing until init finishes */
mutex_lock(&devcd->mutex);
devcd->init_completed = false;
INIT_DELAYED_WORK(&devcd->del_wk, devcd_del);
schedule_delayed_work(&devcd->del_wk, timeout);
if (device_add(&devcd->devcd_dev))
goto put_device;
@ -401,13 +424,20 @@ void dev_coredumpm_timeout(struct device *dev, struct module *owner,
dev_set_uevent_suppress(&devcd->devcd_dev, false);
kobject_uevent(&devcd->devcd_dev.kobj, KOBJ_ADD);
INIT_DELAYED_WORK(&devcd->del_wk, devcd_del);
schedule_delayed_work(&devcd->del_wk, timeout);
/*
* Safe to run devcd_del() now that we are done with devcd_dev.
* Alternatively we could have taken a ref on devcd_dev before
* dropping the lock.
*/
devcd->init_completed = true;
mutex_unlock(&devcd->mutex);
return;
put_device:
put_device(&devcd->devcd_dev);
mutex_unlock(&devcd->mutex);
cancel_delayed_work_sync(&devcd->del_wk);
put_device(&devcd->devcd_dev);
put_module:
module_put(owner);
free:

View File

@ -498,17 +498,26 @@ int compat_only_sysfs_link_entry_to_kobj(struct kobject *kobj,
}
EXPORT_SYMBOL_GPL(compat_only_sysfs_link_entry_to_kobj);
static int sysfs_group_attrs_change_owner(struct kernfs_node *grp_kn,
static int sysfs_group_attrs_change_owner(struct kobject *kobj,
struct kernfs_node *grp_kn,
const struct attribute_group *grp,
struct iattr *newattrs)
{
struct kernfs_node *kn;
int error;
int error, i;
umode_t mode;
if (grp->attrs) {
struct attribute *const *attr;
for (attr = grp->attrs; *attr; attr++) {
for (i = 0, attr = grp->attrs; *attr; i++, attr++) {
if (grp->is_visible) {
mode = grp->is_visible(kobj, *attr, i);
if (mode & SYSFS_GROUP_INVISIBLE)
break;
if (!mode)
continue;
}
kn = kernfs_find_and_get(grp_kn, (*attr)->name);
if (!kn)
return -ENOENT;
@ -523,7 +532,14 @@ static int sysfs_group_attrs_change_owner(struct kernfs_node *grp_kn,
if (grp->bin_attrs) {
const struct bin_attribute *const *bin_attr;
for (bin_attr = grp->bin_attrs; *bin_attr; bin_attr++) {
for (i = 0, bin_attr = grp->bin_attrs; *bin_attr; i++, bin_attr++) {
if (grp->is_bin_visible) {
mode = grp->is_bin_visible(kobj, *bin_attr, i);
if (mode & SYSFS_GROUP_INVISIBLE)
break;
if (!mode)
continue;
}
kn = kernfs_find_and_get(grp_kn, (*bin_attr)->attr.name);
if (!kn)
return -ENOENT;
@ -573,7 +589,7 @@ int sysfs_group_change_owner(struct kobject *kobj,
error = kernfs_setattr(grp_kn, &newattrs);
if (!error)
error = sysfs_group_attrs_change_owner(grp_kn, grp, &newattrs);
error = sysfs_group_attrs_change_owner(kobj, grp_kn, grp, &newattrs);
kernfs_put(grp_kn);

View File

@ -217,13 +217,7 @@ impl<Ctx: device::DeviceContext> Device<Ctx> {
/// Returns a reference to the parent [`device::Device`], if any.
pub fn parent(&self) -> Option<&device::Device> {
let ptr: *const Self = self;
// CAST: `Device<Ctx: DeviceContext>` types are transparent to each other.
let ptr: *const Device = ptr.cast();
// SAFETY: `ptr` was derived from `&self`.
let this = unsafe { &*ptr };
this.as_ref().parent()
self.as_ref().parent()
}
}

View File

@ -251,7 +251,7 @@ impl<Ctx: DeviceContext> Device<Ctx> {
/// Returns a reference to the parent device, if any.
#[cfg_attr(not(CONFIG_AUXILIARY_BUS), expect(dead_code))]
pub(crate) fn parent(&self) -> Option<&Self> {
pub(crate) fn parent(&self) -> Option<&Device> {
// SAFETY:
// - By the type invariant `self.as_raw()` is always valid.
// - The parent device is only ever set at device creation.
@ -264,7 +264,7 @@ impl<Ctx: DeviceContext> Device<Ctx> {
// - Since `parent` is not NULL, it must be a valid pointer to a `struct device`.
// - `parent` is valid for the lifetime of `self`, since a `struct device` holds a
// reference count of its parent.
Some(unsafe { Self::from_raw(parent) })
Some(unsafe { Device::from_raw(parent) })
}
}