mlx5e: modify driver for handling offsets
authorKevin Laatz <kevin.laatz@intel.com>
Tue, 27 Aug 2019 02:25:25 +0000 (02:25 +0000)
committerDaniel Borkmann <daniel@iogearbox.net>
Fri, 30 Aug 2019 23:08:26 +0000 (01:08 +0200)
With the addition of the unaligned chunks option, we need to make sure we
handle the offsets accordingly based on the mode we are currently running
in. This patch modifies the driver to appropriately mask the address for
each case.

Signed-off-by: Kevin Laatz <kevin.laatz@intel.com>
Acked-by: Jonathan Lemon <jonathan.lemon@gmail.com>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c

index 1ed5c33e022f5d8fbdeff14ba598fe8a1cef9c05..f049e0ac308a00c587a3260aa110e48f4accb445 100644 (file)
@@ -122,6 +122,7 @@ bool mlx5e_xdp_handle(struct mlx5e_rq *rq, struct mlx5e_dma_info *di,
                      void *va, u16 *rx_headroom, u32 *len, bool xsk)
 {
        struct bpf_prog *prog = READ_ONCE(rq->xdp_prog);
+       struct xdp_umem *umem = rq->umem;
        struct xdp_buff xdp;
        u32 act;
        int err;
@@ -138,8 +139,11 @@ bool mlx5e_xdp_handle(struct mlx5e_rq *rq, struct mlx5e_dma_info *di,
        xdp.rxq = &rq->xdp_rxq;
 
        act = bpf_prog_run_xdp(prog, &xdp);
-       if (xsk)
-               xdp.handle += xdp.data - xdp.data_hard_start;
+       if (xsk) {
+               u64 off = xdp.data - xdp.data_hard_start;
+
+               xdp.handle = xsk_umem_adjust_offset(umem, xdp.handle, off);
+       }
        switch (act) {
        case XDP_PASS:
                *rx_headroom = xdp.data - xdp.data_hard_start;
index 6a55573ec8f2964c226e766c7adba007d7c0d717..7c49a66d28c92f2dcb1106318df38a3d0e2019a2 100644 (file)
@@ -24,7 +24,8 @@ int mlx5e_xsk_page_alloc_umem(struct mlx5e_rq *rq,
        if (!xsk_umem_peek_addr_rq(umem, &handle))
                return -ENOMEM;
 
-       dma_info->xsk.handle = handle + rq->buff.umem_headroom;
+       dma_info->xsk.handle = xsk_umem_adjust_offset(umem, handle,
+                                                     rq->buff.umem_headroom);
        dma_info->xsk.data = xdp_umem_get_data(umem, dma_info->xsk.handle);
 
        /* No need to add headroom to the DMA address. In striding RQ case, we