net/mlx5e: do not create xdp_redirect for non-uplink rep
authorWilliam Tu <witu@nvidia.com>
Thu, 31 Oct 2024 12:58:56 +0000 (14:58 +0200)
committerJakub Kicinski <kuba@kernel.org>
Sun, 3 Nov 2024 23:37:15 +0000 (15:37 -0800)
XDP and XDP socket require extra SQ/RQ/CQs. Most of these resources
are dynamically created: no XDP program loaded, no resources are
created. One exception is the SQ/CQ created for XDP_REDRIECT, used
for other netdev to forward packet to mlx5 for transmit. The patch
disables creation of SQ and CQ used for egress XDP_REDIRECT, by
checking whether ndo_xdp_xmit is set or not.

For netdev without XDP support such as non-uplink representor, this
saves around 0.35MB of memory, per representor netdevice per channel.

Signed-off-by: William Tu <witu@nvidia.com>
Reviewed-by: Parav Pandit <parav@nvidia.com>
Signed-off-by: Tariq Toukan <tariqt@nvidia.com>
Link: https://patch.msgid.link/20241031125856.530927-6-tariqt@nvidia.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
drivers/net/ethernet/mellanox/mlx5/core/en_main.c

index 2f609b92d29b08a710c7688844735df10bd0f2dc..59d7a0e28f24b7941ebfa0bc4544f71e7d4bb923 100644 (file)
@@ -2514,6 +2514,7 @@ static int mlx5e_open_queues(struct mlx5e_channel *c,
                             struct mlx5e_params *params,
                             struct mlx5e_channel_param *cparam)
 {
+       const struct net_device_ops *netdev_ops = c->netdev->netdev_ops;
        struct dim_cq_moder icocq_moder = {0, 0};
        struct mlx5e_create_cq_param ccp;
        int err;
@@ -2534,10 +2535,12 @@ static int mlx5e_open_queues(struct mlx5e_channel *c,
        if (err)
                goto err_close_icosq_cq;
 
-       c->xdpsq = mlx5e_open_xdpredirect_sq(c, params, cparam, &ccp);
-       if (IS_ERR(c->xdpsq)) {
-               err = PTR_ERR(c->xdpsq);
-               goto err_close_tx_cqs;
+       if (netdev_ops->ndo_xdp_xmit) {
+               c->xdpsq = mlx5e_open_xdpredirect_sq(c, params, cparam, &ccp);
+               if (IS_ERR(c->xdpsq)) {
+                       err = PTR_ERR(c->xdpsq);
+                       goto err_close_tx_cqs;
+               }
        }
 
        err = mlx5e_open_cq(c->mdev, params->rx_cq_moderation, &cparam->rq.cqp, &ccp,
@@ -2601,7 +2604,8 @@ err_close_rx_cq:
        mlx5e_close_cq(&c->rq.cq);
 
 err_close_xdpredirect_sq:
-       mlx5e_close_xdpredirect_sq(c->xdpsq);
+       if (c->xdpsq)
+               mlx5e_close_xdpredirect_sq(c->xdpsq);
 
 err_close_tx_cqs:
        mlx5e_close_tx_cqs(c);
@@ -2629,7 +2633,8 @@ static void mlx5e_close_queues(struct mlx5e_channel *c)
        if (c->xdp)
                mlx5e_close_cq(&c->rq_xdpsq.cq);
        mlx5e_close_cq(&c->rq.cq);
-       mlx5e_close_xdpredirect_sq(c->xdpsq);
+       if (c->xdpsq)
+               mlx5e_close_xdpredirect_sq(c->xdpsq);
        mlx5e_close_tx_cqs(c);
        mlx5e_close_cq(&c->icosq.cq);
        mlx5e_close_cq(&c->async_icosq.cq);