1
0
mirror of https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git synced 2026-01-11 17:10:13 +00:00

net: stmmac: fix the crash issue for zero copy XDP_TX action

There is a crash issue when running zero copy XDP_TX action, the crash
log is shown below.

[  216.122464] Unable to handle kernel paging request at virtual address fffeffff80000000
[  216.187524] Internal error: Oops: 0000000096000144 [#1]  SMP
[  216.301694] Call trace:
[  216.304130]  dcache_clean_poc+0x20/0x38 (P)
[  216.308308]  __dma_sync_single_for_device+0x1bc/0x1e0
[  216.313351]  stmmac_xdp_xmit_xdpf+0x354/0x400
[  216.317701]  __stmmac_xdp_run_prog+0x164/0x368
[  216.322139]  stmmac_napi_poll_rxtx+0xba8/0xf00
[  216.326576]  __napi_poll+0x40/0x218
[  216.408054] Kernel panic - not syncing: Oops: Fatal exception in interrupt

For XDP_TX action, the xdp_buff is converted to xdp_frame by
xdp_convert_buff_to_frame(). The memory type of the resulting xdp_frame
depends on the memory type of the xdp_buff. For page pool based xdp_buff
it produces xdp_frame with memory type MEM_TYPE_PAGE_POOL. For zero copy
XSK pool based xdp_buff it produces xdp_frame with memory type
MEM_TYPE_PAGE_ORDER0. However, stmmac_xdp_xmit_back() does not check the
memory type and always uses the page pool type, this leads to invalid
mappings and causes the crash. Therefore, check the xdp_buff memory type
in stmmac_xdp_xmit_back() to fix this issue.

Fixes: bba2556efad6 ("net: stmmac: Enable RX via AF_XDP zero-copy")
Signed-off-by: Wei Fang <wei.fang@nxp.com>
Reviewed-by: Hariprasad Kelam <hkelam@marvell.com>
Link: https://patch.msgid.link/20251204071332.1907111-1-wei.fang@nxp.com
Signed-off-by: Paolo Abeni <pabeni@redhat.com>
This commit is contained in:
Wei Fang 2025-12-04 15:13:32 +08:00 committed by Paolo Abeni
parent a6694b7e39
commit a48e232210

View File

@ -89,6 +89,7 @@ MODULE_PARM_DESC(phyaddr, "Physical device address");
#define STMMAC_XDP_CONSUMED BIT(0)
#define STMMAC_XDP_TX BIT(1)
#define STMMAC_XDP_REDIRECT BIT(2)
#define STMMAC_XSK_CONSUMED BIT(3)
static int flow_ctrl = 0xdead;
module_param(flow_ctrl, int, 0644);
@ -5126,6 +5127,7 @@ static int stmmac_xdp_get_tx_queue(struct stmmac_priv *priv,
static int stmmac_xdp_xmit_back(struct stmmac_priv *priv,
struct xdp_buff *xdp)
{
bool zc = !!(xdp->rxq->mem.type == MEM_TYPE_XSK_BUFF_POOL);
struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
int cpu = smp_processor_id();
struct netdev_queue *nq;
@ -5142,9 +5144,18 @@ static int stmmac_xdp_xmit_back(struct stmmac_priv *priv,
/* Avoids TX time-out as we are sharing with slow path */
txq_trans_cond_update(nq);
res = stmmac_xdp_xmit_xdpf(priv, queue, xdpf, false);
if (res == STMMAC_XDP_TX)
/* For zero copy XDP_TX action, dma_map is true */
res = stmmac_xdp_xmit_xdpf(priv, queue, xdpf, zc);
if (res == STMMAC_XDP_TX) {
stmmac_flush_tx_descriptors(priv, queue);
} else if (res == STMMAC_XDP_CONSUMED && zc) {
/* xdp has been freed by xdp_convert_buff_to_frame(),
* no need to call xsk_buff_free() again, so return
* STMMAC_XSK_CONSUMED.
*/
res = STMMAC_XSK_CONSUMED;
xdp_return_frame(xdpf);
}
__netif_tx_unlock(nq);
@ -5494,6 +5505,8 @@ read_again:
break;
case STMMAC_XDP_CONSUMED:
xsk_buff_free(buf->xdp);
fallthrough;
case STMMAC_XSK_CONSUMED:
rx_dropped++;
break;
case STMMAC_XDP_TX: