1
0
mirror of https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git synced 2026-01-12 01:20:14 +00:00

futex: Convert to get/put_user_inline()

Replace the open coded implementation with the new get/put_user_inline()
helpers. This might be replaced by a regular get/put_user(), but that needs
a proper performance evaluation.

No functional change intended.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Link: https://patch.msgid.link/20251027083745.736737934@linutronix.de
This commit is contained in:
Thomas Gleixner 2025-10-27 09:44:00 +01:00 committed by Ingo Molnar
parent b2cfc0cd68
commit e4e28fd698
2 changed files with 5 additions and 57 deletions

View File

@ -581,7 +581,7 @@ int get_futex_key(u32 __user *uaddr, unsigned int flags, union futex_key *key,
if (flags & FLAGS_NUMA) {
u32 __user *naddr = (void *)uaddr + size / 2;
if (futex_get_value(&node, naddr))
if (get_user_inline(node, naddr))
return -EFAULT;
if ((node != FUTEX_NO_NODE) &&
@ -601,7 +601,7 @@ int get_futex_key(u32 __user *uaddr, unsigned int flags, union futex_key *key,
node = numa_node_id();
node_updated = true;
}
if (node_updated && futex_put_value(node, naddr))
if (node_updated && put_user_inline(node, naddr))
return -EFAULT;
}

View File

@ -281,63 +281,11 @@ static inline int futex_cmpxchg_value_locked(u32 *curval, u32 __user *uaddr, u32
return ret;
}
/*
* This does a plain atomic user space read, and the user pointer has
* already been verified earlier by get_futex_key() to be both aligned
* and actually in user space, just like futex_atomic_cmpxchg_inatomic().
*
* We still want to avoid any speculation, and while __get_user() is
* the traditional model for this, it's actually slower than doing
* this manually these days.
*
* We could just have a per-architecture special function for it,
* the same way we do futex_atomic_cmpxchg_inatomic(), but rather
* than force everybody to do that, write it out long-hand using
* the low-level user-access infrastructure.
*
* This looks a bit overkill, but generally just results in a couple
* of instructions.
*/
static __always_inline int futex_get_value(u32 *dest, u32 __user *from)
{
u32 val;
if (can_do_masked_user_access())
from = masked_user_access_begin(from);
else if (!user_read_access_begin(from, sizeof(*from)))
return -EFAULT;
unsafe_get_user(val, from, Efault);
user_read_access_end();
*dest = val;
return 0;
Efault:
user_read_access_end();
return -EFAULT;
}
static __always_inline int futex_put_value(u32 val, u32 __user *to)
{
if (can_do_masked_user_access())
to = masked_user_access_begin(to);
else if (!user_write_access_begin(to, sizeof(*to)))
return -EFAULT;
unsafe_put_user(val, to, Efault);
user_write_access_end();
return 0;
Efault:
user_write_access_end();
return -EFAULT;
}
/* Read from user memory with pagefaults disabled */
static inline int futex_get_value_locked(u32 *dest, u32 __user *from)
{
int ret;
pagefault_disable();
ret = futex_get_value(dest, from);
pagefault_enable();
return ret;
guard(pagefault)();
return get_user_inline(*dest, from);
}
extern void __futex_unqueue(struct futex_q *q);