typedef bool (*mlx5e_fp_post_rx_wqes)(struct mlx5e_rq *rq);
typedef void (*mlx5e_fp_dealloc_wqe)(struct mlx5e_rq*, u16);
+enum mlx5e_rq_flag {
+ MLX5E_RQ_FLAG_XDP_XMIT = BIT(0),
+};
+
struct mlx5e_rq {
/* data path */
struct mlx5_wq_ll wq;
u32 frag_sz; /* max possible skb frag_sz */
union {
bool page_reuse;
- bool xdp_xmit;
};
} wqe;
struct {
struct bpf_prog *xdp_prog;
unsigned int hw_mtu;
struct mlx5e_xdpsq xdpsq;
+ DECLARE_BITMAP(flags, 8);
/* control */
struct mlx5_wq_ctrl wq_ctrl;
/* move page to reference to sq responsibility,
* and mark so it's not put back in page-cache.
*/
- rq->wqe.xdp_xmit = true;
+ __set_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags); /* non-atomic */
sq->db.di[pi] = *di;
sq->pc++;
skb = skb_from_cqe(rq, cqe, wi, cqe_bcnt);
if (!skb) {
/* probably for XDP */
- if (rq->wqe.xdp_xmit) {
+ if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) {
wi->di.page = NULL;
- rq->wqe.xdp_xmit = false;
/* do not return page to cache, it will be returned on XDP_TX completion */
goto wq_ll_pop;
}
skb = skb_from_cqe(rq, cqe, wi, cqe_bcnt);
if (!skb) {
- if (rq->wqe.xdp_xmit) {
+ if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) {
wi->di.page = NULL;
- rq->wqe.xdp_xmit = false;
/* do not return page to cache, it will be returned on XDP_TX completion */
goto wq_ll_pop;
}