mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2026-01-11 17:10:13 +00:00
Currently, interrupts are automatically enabled immediately upon request. This allows interrupt to fire before the associated NAPI context is fully initialized and cause failures like below: [ 0.946369] Call Trace: [ 0.946369] <IRQ> [ 0.946369] __napi_poll+0x2a/0x1e0 [ 0.946369] net_rx_action+0x2f9/0x3f0 [ 0.946369] handle_softirqs+0xd6/0x2c0 [ 0.946369] ? handle_edge_irq+0xc1/0x1b0 [ 0.946369] __irq_exit_rcu+0xc3/0xe0 [ 0.946369] common_interrupt+0x81/0xa0 [ 0.946369] </IRQ> [ 0.946369] <TASK> [ 0.946369] asm_common_interrupt+0x22/0x40 [ 0.946369] RIP: 0010:pv_native_safe_halt+0xb/0x10 Use the `IRQF_NO_AUTOEN` flag when requesting interrupts to prevent auto enablement and explicitly enable the interrupt in NAPI initialization path (and disable it during NAPI teardown). This ensures that interrupt lifecycle is strictly coupled with readiness of NAPI context. Cc: stable@vger.kernel.org Fixes: 1dfc2e46117e ("gve: Refactor napi add and remove functions") Signed-off-by: Ankit Garg <nktgrg@google.com> Reviewed-by: Jordan Rhee <jordanrhee@google.com> Reviewed-by: Joshua Washington <joshwash@google.com> Signed-off-by: Harshitha Ramamurthy <hramamurthy@google.com> Link: https://patch.msgid.link/20251219102945.2193617-1-hramamurthy@google.com Signed-off-by: Paolo Abeni <pabeni@redhat.com>
125 lines
3.2 KiB
C
125 lines
3.2 KiB
C
// SPDX-License-Identifier: (GPL-2.0 OR MIT)
|
|
/* Google virtual Ethernet (gve) driver
|
|
*
|
|
* Copyright (C) 2015-2021 Google, Inc.
|
|
*/
|
|
|
|
#include "gve.h"
|
|
#include "gve_adminq.h"
|
|
#include "gve_utils.h"
|
|
|
|
bool gve_tx_was_added_to_block(struct gve_priv *priv, int queue_idx)
|
|
{
|
|
struct gve_notify_block *block =
|
|
&priv->ntfy_blocks[gve_tx_idx_to_ntfy(priv, queue_idx)];
|
|
|
|
return block->tx != NULL;
|
|
}
|
|
|
|
void gve_tx_remove_from_block(struct gve_priv *priv, int queue_idx)
|
|
{
|
|
struct gve_notify_block *block =
|
|
&priv->ntfy_blocks[gve_tx_idx_to_ntfy(priv, queue_idx)];
|
|
|
|
block->tx = NULL;
|
|
}
|
|
|
|
void gve_tx_add_to_block(struct gve_priv *priv, int queue_idx)
|
|
{
|
|
unsigned int active_cpus = min_t(int, priv->num_ntfy_blks / 2,
|
|
num_online_cpus());
|
|
int ntfy_idx = gve_tx_idx_to_ntfy(priv, queue_idx);
|
|
struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
|
|
struct gve_tx_ring *tx = &priv->tx[queue_idx];
|
|
|
|
block->tx = tx;
|
|
tx->ntfy_id = ntfy_idx;
|
|
netif_set_xps_queue(priv->dev, get_cpu_mask(ntfy_idx % active_cpus),
|
|
queue_idx);
|
|
}
|
|
|
|
bool gve_rx_was_added_to_block(struct gve_priv *priv, int queue_idx)
|
|
{
|
|
struct gve_notify_block *block =
|
|
&priv->ntfy_blocks[gve_rx_idx_to_ntfy(priv, queue_idx)];
|
|
|
|
return block->rx != NULL;
|
|
}
|
|
|
|
void gve_rx_remove_from_block(struct gve_priv *priv, int queue_idx)
|
|
{
|
|
struct gve_notify_block *block =
|
|
&priv->ntfy_blocks[gve_rx_idx_to_ntfy(priv, queue_idx)];
|
|
|
|
block->rx = NULL;
|
|
}
|
|
|
|
void gve_rx_add_to_block(struct gve_priv *priv, int queue_idx)
|
|
{
|
|
u32 ntfy_idx = gve_rx_idx_to_ntfy(priv, queue_idx);
|
|
struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
|
|
struct gve_rx_ring *rx = &priv->rx[queue_idx];
|
|
|
|
block->rx = rx;
|
|
rx->ntfy_id = ntfy_idx;
|
|
}
|
|
|
|
struct sk_buff *gve_rx_copy_data(struct net_device *dev, struct napi_struct *napi,
|
|
u8 *data, u16 len)
|
|
{
|
|
struct sk_buff *skb;
|
|
|
|
skb = napi_alloc_skb(napi, len);
|
|
if (unlikely(!skb))
|
|
return NULL;
|
|
|
|
__skb_put(skb, len);
|
|
skb_copy_to_linear_data_offset(skb, 0, data, len);
|
|
skb->protocol = eth_type_trans(skb, dev);
|
|
|
|
return skb;
|
|
}
|
|
|
|
struct sk_buff *gve_rx_copy(struct net_device *dev, struct napi_struct *napi,
|
|
struct gve_rx_slot_page_info *page_info, u16 len)
|
|
{
|
|
void *va = page_info->page_address + page_info->page_offset +
|
|
page_info->pad;
|
|
|
|
return gve_rx_copy_data(dev, napi, va, len);
|
|
}
|
|
|
|
void gve_dec_pagecnt_bias(struct gve_rx_slot_page_info *page_info)
|
|
{
|
|
page_info->pagecnt_bias--;
|
|
if (page_info->pagecnt_bias == 0) {
|
|
int pagecount = page_count(page_info->page);
|
|
|
|
/* If we have run out of bias - set it back up to INT_MAX
|
|
* minus the existing refs.
|
|
*/
|
|
page_info->pagecnt_bias = INT_MAX - pagecount;
|
|
|
|
/* Set pagecount back up to max. */
|
|
page_ref_add(page_info->page, INT_MAX - pagecount);
|
|
}
|
|
}
|
|
|
|
void gve_add_napi(struct gve_priv *priv, int ntfy_idx,
|
|
int (*gve_poll)(struct napi_struct *, int))
|
|
{
|
|
struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
|
|
|
|
netif_napi_add_locked(priv->dev, &block->napi, gve_poll);
|
|
netif_napi_set_irq_locked(&block->napi, block->irq);
|
|
enable_irq(block->irq);
|
|
}
|
|
|
|
void gve_remove_napi(struct gve_priv *priv, int ntfy_idx)
|
|
{
|
|
struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
|
|
|
|
disable_irq(block->irq);
|
|
netif_napi_del_locked(&block->napi);
|
|
}
|