1
0
mirror of https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git synced 2026-01-11 17:10:13 +00:00

net: Add rfs_needed() helper

Add a helper to check if RFS is needed or not. Allows to make the code a
bit cleaner and the next patch to have MPTCP use this helper to decide
whether or not to iterate over the subflows.

tun_flow_update() was calling sock_rps_record_flow_hash() regardless of
the state of rfs_needed. This was not really a bug as sock_flow_table
simply ends up being NULL and thus everything will be fine.
This commit here thus also implicitly makes tun_flow_update() respect
the state of rfs_needed.

Suggested-by: Matthieu Baerts <matttbe@kernel.org>
Signed-off-by: Christoph Paasch <cpaasch@openai.com>
Acked-by: Matthieu Baerts (NGI0) <matttbe@kernel.org>
Signed-off-by: Matthieu Baerts (NGI0) <matttbe@kernel.org>
Link: https://patch.msgid.link/20250902-net-next-mptcp-misc-feat-6-18-v2-3-fa02bb3188b1@kernel.org
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
Christoph Paasch 2025-09-02 23:11:35 +02:00 committed by Jakub Kicinski
parent 3fff72f827
commit 929324913e

View File

@ -85,11 +85,8 @@ static inline void rps_record_sock_flow(struct rps_sock_flow_table *table,
WRITE_ONCE(table->ents[index], val);
}
#endif /* CONFIG_RPS */
static inline void sock_rps_record_flow_hash(__u32 hash)
static inline void _sock_rps_record_flow_hash(__u32 hash)
{
#ifdef CONFIG_RPS
struct rps_sock_flow_table *sock_flow_table;
if (!hash)
@ -99,42 +96,33 @@ static inline void sock_rps_record_flow_hash(__u32 hash)
if (sock_flow_table)
rps_record_sock_flow(sock_flow_table, hash);
rcu_read_unlock();
#endif
}
static inline void sock_rps_record_flow(const struct sock *sk)
static inline void _sock_rps_record_flow(const struct sock *sk)
{
#ifdef CONFIG_RPS
if (static_branch_unlikely(&rfs_needed)) {
/* Reading sk->sk_rxhash might incur an expensive cache line
* miss.
*
* TCP_ESTABLISHED does cover almost all states where RFS
* might be useful, and is cheaper [1] than testing :
* IPv4: inet_sk(sk)->inet_daddr
* IPv6: ipv6_addr_any(&sk->sk_v6_daddr)
* OR an additional socket flag
* [1] : sk_state and sk_prot are in the same cache line.
/* Reading sk->sk_rxhash might incur an expensive cache line
* miss.
*
* TCP_ESTABLISHED does cover almost all states where RFS
* might be useful, and is cheaper [1] than testing :
* IPv4: inet_sk(sk)->inet_daddr
* IPv6: ipv6_addr_any(&sk->sk_v6_daddr)
* OR an additional socket flag
* [1] : sk_state and sk_prot are in the same cache line.
*/
if (sk->sk_state == TCP_ESTABLISHED) {
/* This READ_ONCE() is paired with the WRITE_ONCE()
* from sock_rps_save_rxhash() and sock_rps_reset_rxhash().
*/
if (sk->sk_state == TCP_ESTABLISHED) {
/* This READ_ONCE() is paired with the WRITE_ONCE()
* from sock_rps_save_rxhash() and sock_rps_reset_rxhash().
*/
sock_rps_record_flow_hash(READ_ONCE(sk->sk_rxhash));
}
_sock_rps_record_flow_hash(READ_ONCE(sk->sk_rxhash));
}
#endif
}
static inline void sock_rps_delete_flow(const struct sock *sk)
static inline void _sock_rps_delete_flow(const struct sock *sk)
{
#ifdef CONFIG_RPS
struct rps_sock_flow_table *table;
u32 hash, index;
if (!static_branch_unlikely(&rfs_needed))
return;
hash = READ_ONCE(sk->sk_rxhash);
if (!hash)
return;
@ -147,6 +135,45 @@ static inline void sock_rps_delete_flow(const struct sock *sk)
WRITE_ONCE(table->ents[index], RPS_NO_CPU);
}
rcu_read_unlock();
}
#endif /* CONFIG_RPS */
static inline bool rfs_is_needed(void)
{
#ifdef CONFIG_RPS
return static_branch_unlikely(&rfs_needed);
#else
return false;
#endif
}
static inline void sock_rps_record_flow_hash(__u32 hash)
{
#ifdef CONFIG_RPS
if (!rfs_is_needed())
return;
_sock_rps_record_flow_hash(hash);
#endif
}
static inline void sock_rps_record_flow(const struct sock *sk)
{
#ifdef CONFIG_RPS
if (!rfs_is_needed())
return;
_sock_rps_record_flow(sk);
#endif
}
static inline void sock_rps_delete_flow(const struct sock *sk)
{
#ifdef CONFIG_RPS
if (!rfs_is_needed())
return;
_sock_rps_delete_flow(sk);
#endif
}