if (pcie_tph_set_st_entry(irq->bp->pdev, irq->msix_nr, tag))
return;
- rtnl_lock();
+ netdev_lock(irq->bp->dev);
if (netif_running(irq->bp->dev)) {
err = netdev_rx_queue_restart(irq->bp->dev, irq->ring_nr);
if (err)
netdev_err(irq->bp->dev,
"RX queue restart failed: err=%d\n", err);
}
- rtnl_unlock();
+ netdev_unlock(irq->bp->dev);
}
static void bnxt_irq_affinity_release(struct kref *ref)
if (ret != 2)
return -EINVAL;
- rtnl_lock();
+ netdev_lock(ns->netdev);
if (queue >= ns->netdev->real_num_rx_queues) {
ret = -EINVAL;
goto exit_unlock;
ret = count;
exit_unlock:
- rtnl_unlock();
+ netdev_unlock(ns->netdev);
return ret;
}
/* Device memory support */
-/* Protected by rtnl_lock() */
static DEFINE_XARRAY_FLAGS(net_devmem_dmabuf_bindings, XA_FLAGS_ALLOC1);
static const struct memory_provider_ops dmabuf_devmem_ops;
rxq->mp_params.mp_priv = NULL;
rxq->mp_params.mp_ops = NULL;
+ netdev_lock(binding->dev);
rxq_idx = get_netdev_rx_queue_index(rxq);
-
WARN_ON(netdev_rx_queue_restart(binding->dev, rxq_idx));
+ netdev_unlock(binding->dev);
}
xa_erase(&net_devmem_dmabuf_bindings, binding->id);
}
mutex_lock(&priv->lock);
- rtnl_lock();
- netdev = __dev_get_by_index(genl_info_net(info), ifindex);
+ netdev = netdev_get_by_index_lock(genl_info_net(info), ifindex);
if (!netdev || !netif_device_present(netdev)) {
err = -ENODEV;
- goto err_unlock;
+ goto err_unlock_sock;
}
if (dev_xdp_prog_count(netdev)) {
if (err)
goto err_unbind;
- rtnl_unlock();
+ netdev_unlock(netdev);
+
mutex_unlock(&priv->lock);
return 0;
err_unbind:
net_devmem_unbind_dmabuf(binding);
err_unlock:
- rtnl_unlock();
+ netdev_unlock(netdev);
+err_unlock_sock:
mutex_unlock(&priv->lock);
err_genlmsg_free:
nlmsg_free(rsp);
mutex_lock(&priv->lock);
list_for_each_entry_safe(binding, temp, &priv->bindings, list) {
- rtnl_lock();
net_devmem_unbind_dmabuf(binding);
- rtnl_unlock();
}
mutex_unlock(&priv->lock);
}
// SPDX-License-Identifier: GPL-2.0-or-later
#include <linux/netdevice.h>
+#include <net/netdev_lock.h>
#include <net/netdev_queues.h>
#include <net/netdev_rx_queue.h>
#include <net/page_pool/memory_provider.h>
!qops->ndo_queue_mem_alloc || !qops->ndo_queue_start)
return -EOPNOTSUPP;
- ASSERT_RTNL();
+ netdev_assert_locked(dev);
new_mem = kvzalloc(qops->ndo_queue_mem_size, GFP_KERNEL);
if (!new_mem)
goto err_free_new_mem;
}
- netdev_lock(dev);
-
err = qops->ndo_queue_mem_alloc(dev, new_mem, rxq_idx);
if (err)
goto err_free_old_mem;
qops->ndo_queue_mem_free(dev, old_mem);
- netdev_unlock(dev);
-
kvfree(old_mem);
kvfree(new_mem);
qops->ndo_queue_mem_free(dev, new_mem);
err_free_old_mem:
- netdev_unlock(dev);
kvfree(old_mem);
err_free_new_mem:
{
int ret;
- rtnl_lock();
+ netdev_lock(dev);
ret = __net_mp_open_rxq(dev, ifq_idx, p);
- rtnl_unlock();
+ netdev_unlock(dev);
return ret;
}
void net_mp_close_rxq(struct net_device *dev, unsigned ifq_idx,
struct pp_memory_provider_params *old_p)
{
- rtnl_lock();
+ netdev_lock(dev);
__net_mp_close_rxq(dev, ifq_idx, old_p);
- rtnl_unlock();
+ netdev_unlock(dev);
}