net: hold netdev instance lock during queue operations

For the drivers that use queue management API, switch to the mode where
core stack holds the netdev instance lock. This affects the following
drivers:
- bnxt
- gve
- netdevsim

Originally I locked only start/stop, but switched to holding the
lock over all iterations to make them look atomic to the device
(feels like it should be easier to reason about).

Reviewed-by: Eric Dumazet <edumazet@google.com>
Cc: Saeed Mahameed <saeed@kernel.org>
Signed-off-by: Stanislav Fomichev <sdf@fomichev.me>
Link: https://patch.msgid.link/20250305163732.2766420-6-sdf@fomichev.me
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
Stanislav Fomichev 2025-03-05 08:37:23 -08:00 committed by Jakub Kicinski
parent a0527ee2df
commit cae03e5bdd
6 changed files with 41 additions and 25 deletions

View file

@ -11508,7 +11508,7 @@ static int bnxt_request_irq(struct bnxt *bp)
if (rc)
break;
netif_napi_set_irq(&bp->bnapi[i]->napi, irq->vector);
netif_napi_set_irq_locked(&bp->bnapi[i]->napi, irq->vector);
irq->requested = 1;
if (zalloc_cpumask_var(&irq->cpu_mask, GFP_KERNEL)) {
@ -11557,9 +11557,9 @@ static void bnxt_del_napi(struct bnxt *bp)
for (i = 0; i < bp->cp_nr_rings; i++) {
struct bnxt_napi *bnapi = bp->bnapi[i];
__netif_napi_del(&bnapi->napi);
__netif_napi_del_locked(&bnapi->napi);
}
/* We called __netif_napi_del(), we need
/* We called __netif_napi_del_locked(), we need
* to respect an RCU grace period before freeing napi structures.
*/
synchronize_net();
@ -11578,12 +11578,12 @@ static void bnxt_init_napi(struct bnxt *bp)
cp_nr_rings--;
for (i = 0; i < cp_nr_rings; i++) {
bnapi = bp->bnapi[i];
netif_napi_add_config(bp->dev, &bnapi->napi, poll_fn,
bnapi->index);
netif_napi_add_config_locked(bp->dev, &bnapi->napi, poll_fn,
bnapi->index);
}
if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
bnapi = bp->bnapi[cp_nr_rings];
netif_napi_add(bp->dev, &bnapi->napi, bnxt_poll_nitroa0);
netif_napi_add_locked(bp->dev, &bnapi->napi, bnxt_poll_nitroa0);
}
}
@ -11604,7 +11604,7 @@ static void bnxt_disable_napi(struct bnxt *bp)
cpr->sw_stats->tx.tx_resets++;
if (bnapi->in_reset)
cpr->sw_stats->rx.rx_resets++;
napi_disable(&bnapi->napi);
napi_disable_locked(&bnapi->napi);
}
}
@ -11626,7 +11626,7 @@ static void bnxt_enable_napi(struct bnxt *bp)
INIT_WORK(&cpr->dim.work, bnxt_dim_work);
cpr->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
}
napi_enable(&bnapi->napi);
napi_enable_locked(&bnapi->napi);
}
}

View file

@ -1968,7 +1968,7 @@ static void gve_turndown(struct gve_priv *priv)
netif_queue_set_napi(priv->dev, idx,
NETDEV_QUEUE_TYPE_TX, NULL);
napi_disable(&block->napi);
napi_disable_locked(&block->napi);
}
for (idx = 0; idx < priv->rx_cfg.num_queues; idx++) {
int ntfy_idx = gve_rx_idx_to_ntfy(priv, idx);
@ -1979,7 +1979,7 @@ static void gve_turndown(struct gve_priv *priv)
netif_queue_set_napi(priv->dev, idx, NETDEV_QUEUE_TYPE_RX,
NULL);
napi_disable(&block->napi);
napi_disable_locked(&block->napi);
}
/* Stop tx queues */
@ -2009,7 +2009,7 @@ static void gve_turnup(struct gve_priv *priv)
if (!gve_tx_was_added_to_block(priv, idx))
continue;
napi_enable(&block->napi);
napi_enable_locked(&block->napi);
if (idx < priv->tx_cfg.num_queues)
netif_queue_set_napi(priv->dev, idx,
@ -2037,7 +2037,7 @@ static void gve_turnup(struct gve_priv *priv)
if (!gve_rx_was_added_to_block(priv, idx))
continue;
napi_enable(&block->napi);
napi_enable_locked(&block->napi);
netif_queue_set_napi(priv->dev, idx, NETDEV_QUEUE_TYPE_RX,
&block->napi);
@ -2887,6 +2887,7 @@ static int gve_suspend(struct pci_dev *pdev, pm_message_t state)
priv->suspend_cnt++;
rtnl_lock();
netdev_lock(netdev);
if (was_up && gve_close(priv->dev)) {
/* If the dev was up, attempt to close, if close fails, reset */
gve_reset_and_teardown(priv, was_up);
@ -2895,6 +2896,7 @@ static int gve_suspend(struct pci_dev *pdev, pm_message_t state)
gve_teardown_priv_resources(priv);
}
priv->up_before_suspend = was_up;
netdev_unlock(netdev);
rtnl_unlock();
return 0;
}
@ -2907,7 +2909,9 @@ static int gve_resume(struct pci_dev *pdev)
priv->resume_cnt++;
rtnl_lock();
netdev_lock(netdev);
err = gve_reset_recovery(priv, priv->up_before_suspend);
netdev_unlock(netdev);
rtnl_unlock();
return err;
}

View file

@ -110,13 +110,13 @@ void gve_add_napi(struct gve_priv *priv, int ntfy_idx,
{
struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
netif_napi_add(priv->dev, &block->napi, gve_poll);
netif_napi_set_irq(&block->napi, block->irq);
netif_napi_add_locked(priv->dev, &block->napi, gve_poll);
netif_napi_set_irq_locked(&block->napi, block->irq);
}
void gve_remove_napi(struct gve_priv *priv, int ntfy_idx)
{
struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
netif_napi_del(&block->napi);
netif_napi_del_locked(&block->napi);
}

View file

@ -683,7 +683,8 @@ nsim_queue_mem_alloc(struct net_device *dev, void *per_queue_mem, int idx)
goto err_free;
if (!ns->rq_reset_mode)
netif_napi_add_config(dev, &qmem->rq->napi, nsim_poll, idx);
netif_napi_add_config_locked(dev, &qmem->rq->napi, nsim_poll,
idx);
return 0;
@ -700,7 +701,7 @@ static void nsim_queue_mem_free(struct net_device *dev, void *per_queue_mem)
page_pool_destroy(qmem->pp);
if (qmem->rq) {
if (!ns->rq_reset_mode)
netif_napi_del(&qmem->rq->napi);
netif_napi_del_locked(&qmem->rq->napi);
page_pool_destroy(qmem->rq->page_pool);
nsim_queue_free(qmem->rq);
}
@ -712,9 +713,11 @@ nsim_queue_start(struct net_device *dev, void *per_queue_mem, int idx)
struct nsim_queue_mem *qmem = per_queue_mem;
struct netdevsim *ns = netdev_priv(dev);
netdev_assert_locked(dev);
if (ns->rq_reset_mode == 1) {
ns->rq[idx]->page_pool = qmem->pp;
napi_enable(&ns->rq[idx]->napi);
napi_enable_locked(&ns->rq[idx]->napi);
return 0;
}
@ -722,15 +725,17 @@ nsim_queue_start(struct net_device *dev, void *per_queue_mem, int idx)
* here we want to test various call orders.
*/
if (ns->rq_reset_mode == 2) {
netif_napi_del(&ns->rq[idx]->napi);
netif_napi_add_config(dev, &qmem->rq->napi, nsim_poll, idx);
netif_napi_del_locked(&ns->rq[idx]->napi);
netif_napi_add_config_locked(dev, &qmem->rq->napi, nsim_poll,
idx);
} else if (ns->rq_reset_mode == 3) {
netif_napi_add_config(dev, &qmem->rq->napi, nsim_poll, idx);
netif_napi_del(&ns->rq[idx]->napi);
netif_napi_add_config_locked(dev, &qmem->rq->napi, nsim_poll,
idx);
netif_napi_del_locked(&ns->rq[idx]->napi);
}
ns->rq[idx] = qmem->rq;
napi_enable(&ns->rq[idx]->napi);
napi_enable_locked(&ns->rq[idx]->napi);
return 0;
}
@ -740,7 +745,9 @@ static int nsim_queue_stop(struct net_device *dev, void *per_queue_mem, int idx)
struct nsim_queue_mem *qmem = per_queue_mem;
struct netdevsim *ns = netdev_priv(dev);
napi_disable(&ns->rq[idx]->napi);
netdev_assert_locked(dev);
napi_disable_locked(&ns->rq[idx]->napi);
if (ns->rq_reset_mode == 1) {
qmem->pp = ns->rq[idx]->page_pool;

View file

@ -2755,7 +2755,7 @@ static inline void netdev_assert_locked_or_invisible(struct net_device *dev)
static inline bool netdev_need_ops_lock(struct net_device *dev)
{
bool ret = false;
bool ret = !!dev->queue_mgmt_ops;
#if IS_ENABLED(CONFIG_NET_SHAPER)
ret |= !!dev->netdev_ops->net_shaper_ops;

View file

@ -30,6 +30,8 @@ int netdev_rx_queue_restart(struct net_device *dev, unsigned int rxq_idx)
goto err_free_new_mem;
}
netdev_lock(dev);
err = qops->ndo_queue_mem_alloc(dev, new_mem, rxq_idx);
if (err)
goto err_free_old_mem;
@ -52,6 +54,8 @@ int netdev_rx_queue_restart(struct net_device *dev, unsigned int rxq_idx)
qops->ndo_queue_mem_free(dev, old_mem);
netdev_unlock(dev);
kvfree(old_mem);
kvfree(new_mem);
@ -76,6 +80,7 @@ err_free_new_queue_mem:
qops->ndo_queue_mem_free(dev, new_mem);
err_free_old_mem:
netdev_unlock(dev);
kvfree(old_mem);
err_free_new_mem: