net: drop rtnl_lock for queue_mgmt operations

All drivers that use queue API are already converted to use
netdev instance lock. Move netdev instance lock management to
the netlink layer and drop rtnl_lock.

Signed-off-by: Stanislav Fomichev <sdf@fomichev.me>
Reviewed-by: Mina Almasry. <almasrymina@google.com>
Link: https://patch.msgid.link/20250311144026.4154277-4-sdf@fomichev.me
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
Stanislav Fomichev 2025-03-11 07:40:26 -07:00 committed by Jakub Kicinski
parent 10eef096be
commit 1d22d3060b
5 changed files with 18 additions and 23 deletions

View file

@ -11381,14 +11381,14 @@ static void bnxt_irq_affinity_notify(struct irq_affinity_notify *notify,
if (pcie_tph_set_st_entry(irq->bp->pdev, irq->msix_nr, tag)) if (pcie_tph_set_st_entry(irq->bp->pdev, irq->msix_nr, tag))
return; return;
rtnl_lock(); netdev_lock(irq->bp->dev);
if (netif_running(irq->bp->dev)) { if (netif_running(irq->bp->dev)) {
err = netdev_rx_queue_restart(irq->bp->dev, irq->ring_nr); err = netdev_rx_queue_restart(irq->bp->dev, irq->ring_nr);
if (err) if (err)
netdev_err(irq->bp->dev, netdev_err(irq->bp->dev,
"RX queue restart failed: err=%d\n", err); "RX queue restart failed: err=%d\n", err);
} }
rtnl_unlock(); netdev_unlock(irq->bp->dev);
} }
static void bnxt_irq_affinity_release(struct kref *ref) static void bnxt_irq_affinity_release(struct kref *ref)

View file

@ -787,7 +787,7 @@ nsim_qreset_write(struct file *file, const char __user *data,
if (ret != 2) if (ret != 2)
return -EINVAL; return -EINVAL;
rtnl_lock(); netdev_lock(ns->netdev);
if (queue >= ns->netdev->real_num_rx_queues) { if (queue >= ns->netdev->real_num_rx_queues) {
ret = -EINVAL; ret = -EINVAL;
goto exit_unlock; goto exit_unlock;
@ -801,7 +801,7 @@ nsim_qreset_write(struct file *file, const char __user *data,
ret = count; ret = count;
exit_unlock: exit_unlock:
rtnl_unlock(); netdev_unlock(ns->netdev);
return ret; return ret;
} }

View file

@ -25,7 +25,6 @@
/* Device memory support */ /* Device memory support */
/* Protected by rtnl_lock() */
static DEFINE_XARRAY_FLAGS(net_devmem_dmabuf_bindings, XA_FLAGS_ALLOC1); static DEFINE_XARRAY_FLAGS(net_devmem_dmabuf_bindings, XA_FLAGS_ALLOC1);
static const struct memory_provider_ops dmabuf_devmem_ops; static const struct memory_provider_ops dmabuf_devmem_ops;
@ -128,9 +127,10 @@ void net_devmem_unbind_dmabuf(struct net_devmem_dmabuf_binding *binding)
rxq->mp_params.mp_priv = NULL; rxq->mp_params.mp_priv = NULL;
rxq->mp_params.mp_ops = NULL; rxq->mp_params.mp_ops = NULL;
netdev_lock(binding->dev);
rxq_idx = get_netdev_rx_queue_index(rxq); rxq_idx = get_netdev_rx_queue_index(rxq);
WARN_ON(netdev_rx_queue_restart(binding->dev, rxq_idx)); WARN_ON(netdev_rx_queue_restart(binding->dev, rxq_idx));
netdev_unlock(binding->dev);
} }
xa_erase(&net_devmem_dmabuf_bindings, binding->id); xa_erase(&net_devmem_dmabuf_bindings, binding->id);

View file

@ -860,12 +860,11 @@ int netdev_nl_bind_rx_doit(struct sk_buff *skb, struct genl_info *info)
} }
mutex_lock(&priv->lock); mutex_lock(&priv->lock);
rtnl_lock();
netdev = __dev_get_by_index(genl_info_net(info), ifindex); netdev = netdev_get_by_index_lock(genl_info_net(info), ifindex);
if (!netdev || !netif_device_present(netdev)) { if (!netdev || !netif_device_present(netdev)) {
err = -ENODEV; err = -ENODEV;
goto err_unlock; goto err_unlock_sock;
} }
if (dev_xdp_prog_count(netdev)) { if (dev_xdp_prog_count(netdev)) {
@ -918,7 +917,8 @@ int netdev_nl_bind_rx_doit(struct sk_buff *skb, struct genl_info *info)
if (err) if (err)
goto err_unbind; goto err_unbind;
rtnl_unlock(); netdev_unlock(netdev);
mutex_unlock(&priv->lock); mutex_unlock(&priv->lock);
return 0; return 0;
@ -926,7 +926,8 @@ int netdev_nl_bind_rx_doit(struct sk_buff *skb, struct genl_info *info)
err_unbind: err_unbind:
net_devmem_unbind_dmabuf(binding); net_devmem_unbind_dmabuf(binding);
err_unlock: err_unlock:
rtnl_unlock(); netdev_unlock(netdev);
err_unlock_sock:
mutex_unlock(&priv->lock); mutex_unlock(&priv->lock);
err_genlmsg_free: err_genlmsg_free:
nlmsg_free(rsp); nlmsg_free(rsp);
@ -946,9 +947,7 @@ void netdev_nl_sock_priv_destroy(struct netdev_nl_sock *priv)
mutex_lock(&priv->lock); mutex_lock(&priv->lock);
list_for_each_entry_safe(binding, temp, &priv->bindings, list) { list_for_each_entry_safe(binding, temp, &priv->bindings, list) {
rtnl_lock();
net_devmem_unbind_dmabuf(binding); net_devmem_unbind_dmabuf(binding);
rtnl_unlock();
} }
mutex_unlock(&priv->lock); mutex_unlock(&priv->lock);
} }

View file

@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-or-later // SPDX-License-Identifier: GPL-2.0-or-later
#include <linux/netdevice.h> #include <linux/netdevice.h>
#include <net/netdev_lock.h>
#include <net/netdev_queues.h> #include <net/netdev_queues.h>
#include <net/netdev_rx_queue.h> #include <net/netdev_rx_queue.h>
#include <net/page_pool/memory_provider.h> #include <net/page_pool/memory_provider.h>
@ -18,7 +19,7 @@ int netdev_rx_queue_restart(struct net_device *dev, unsigned int rxq_idx)
!qops->ndo_queue_mem_alloc || !qops->ndo_queue_start) !qops->ndo_queue_mem_alloc || !qops->ndo_queue_start)
return -EOPNOTSUPP; return -EOPNOTSUPP;
ASSERT_RTNL(); netdev_assert_locked(dev);
new_mem = kvzalloc(qops->ndo_queue_mem_size, GFP_KERNEL); new_mem = kvzalloc(qops->ndo_queue_mem_size, GFP_KERNEL);
if (!new_mem) if (!new_mem)
@ -30,8 +31,6 @@ int netdev_rx_queue_restart(struct net_device *dev, unsigned int rxq_idx)
goto err_free_new_mem; goto err_free_new_mem;
} }
netdev_lock(dev);
err = qops->ndo_queue_mem_alloc(dev, new_mem, rxq_idx); err = qops->ndo_queue_mem_alloc(dev, new_mem, rxq_idx);
if (err) if (err)
goto err_free_old_mem; goto err_free_old_mem;
@ -54,8 +53,6 @@ int netdev_rx_queue_restart(struct net_device *dev, unsigned int rxq_idx)
qops->ndo_queue_mem_free(dev, old_mem); qops->ndo_queue_mem_free(dev, old_mem);
netdev_unlock(dev);
kvfree(old_mem); kvfree(old_mem);
kvfree(new_mem); kvfree(new_mem);
@ -80,7 +77,6 @@ err_free_new_queue_mem:
qops->ndo_queue_mem_free(dev, new_mem); qops->ndo_queue_mem_free(dev, new_mem);
err_free_old_mem: err_free_old_mem:
netdev_unlock(dev);
kvfree(old_mem); kvfree(old_mem);
err_free_new_mem: err_free_new_mem:
@ -118,9 +114,9 @@ int net_mp_open_rxq(struct net_device *dev, unsigned ifq_idx,
{ {
int ret; int ret;
rtnl_lock(); netdev_lock(dev);
ret = __net_mp_open_rxq(dev, ifq_idx, p); ret = __net_mp_open_rxq(dev, ifq_idx, p);
rtnl_unlock(); netdev_unlock(dev);
return ret; return ret;
} }
@ -153,7 +149,7 @@ static void __net_mp_close_rxq(struct net_device *dev, unsigned ifq_idx,
void net_mp_close_rxq(struct net_device *dev, unsigned ifq_idx, void net_mp_close_rxq(struct net_device *dev, unsigned ifq_idx,
struct pp_memory_provider_params *old_p) struct pp_memory_provider_params *old_p)
{ {
rtnl_lock(); netdev_lock(dev);
__net_mp_close_rxq(dev, ifq_idx, old_p); __net_mp_close_rxq(dev, ifq_idx, old_p);
rtnl_unlock(); netdev_unlock(dev);
} }