net/mlx5e: Unindent the else-block in mlx5e_xmit_xdp_buff
authorMaxim Mikityanskiy <maximmi@nvidia.com>
Thu, 10 Mar 2022 16:16:17 +0000 (18:16 +0200)
committerSaeed Mahameed <saeedm@nvidia.com>
Fri, 18 Mar 2022 20:51:14 +0000 (13:51 -0700)
The next commit will add more indentation levels to mlx5e_xmit_xdp_buff.
To keep indentation minimal, unindent the else-block of the if-statement
by doing an early return.

Signed-off-by: Maxim Mikityanskiy <maximmi@nvidia.com>
Signed-off-by: Saeed Mahameed <saeedm@nvidia.com>
drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c

index 52e0f0028c35a9ad618ef732531c9ca5d23acc52..368e5494961493df565ad3286ddc775a878cb58f 100644 (file)
@@ -101,24 +101,30 @@ mlx5e_xmit_xdp_buff(struct mlx5e_xdpsq *sq, struct mlx5e_rq *rq,
                xdptxd.dma_addr     = dma_addr;
                xdpi.frame.xdpf     = xdpf;
                xdpi.frame.dma_addr = dma_addr;
-       } else {
-               /* Driver assumes that xdp_convert_buff_to_frame returns
-                * an xdp_frame that points to the same memory region as
-                * the original xdp_buff. It allows to map the memory only
-                * once and to use the DMA_BIDIRECTIONAL mode.
-                */
-
-               xdpi.mode = MLX5E_XDP_XMIT_MODE_PAGE;
 
-               dma_addr = page_pool_get_dma_addr(page) + (xdpf->data - (void *)xdpf);
-               dma_sync_single_for_device(sq->pdev, dma_addr, xdptxd.len,
-                                          DMA_TO_DEVICE);
+               if (unlikely(!INDIRECT_CALL_2(sq->xmit_xdp_frame, mlx5e_xmit_xdp_frame_mpwqe,
+                                             mlx5e_xmit_xdp_frame, sq, &xdptxd, NULL, 0)))
+                       return false;
 
-               xdptxd.dma_addr = dma_addr;
-               xdpi.page.rq    = rq;
-               xdpi.page.page = page;
+               mlx5e_xdpi_fifo_push(&sq->db.xdpi_fifo, &xdpi);
+               return true;
        }
 
+       /* Driver assumes that xdp_convert_buff_to_frame returns an xdp_frame
+        * that points to the same memory region as the original xdp_buff. It
+        * allows to map the memory only once and to use the DMA_BIDIRECTIONAL
+        * mode.
+        */
+
+       xdpi.mode = MLX5E_XDP_XMIT_MODE_PAGE;
+
+       dma_addr = page_pool_get_dma_addr(page) + (xdpf->data - (void *)xdpf);
+       dma_sync_single_for_device(sq->pdev, dma_addr, xdptxd.len, DMA_TO_DEVICE);
+
+       xdptxd.dma_addr = dma_addr;
+       xdpi.page.rq = rq;
+       xdpi.page.page = page;
+
        if (unlikely(!INDIRECT_CALL_2(sq->xmit_xdp_frame, mlx5e_xmit_xdp_frame_mpwqe,
                                      mlx5e_xmit_xdp_frame, sq, &xdptxd, NULL, 0)))
                return false;