Revert "mlx5 updates 2023-12-20"
[linux-block.git] / drivers / net / ethernet / mellanox / mlx5 / core / en / xsk / pool.c
1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2019-2020, Mellanox Technologies inc. All rights reserved. */
3
4 #include <net/xdp_sock_drv.h>
5 #include "pool.h"
6 #include "setup.h"
7 #include "en/params.h"
8
9 static int mlx5e_xsk_map_pool(struct mlx5e_priv *priv,
10                               struct xsk_buff_pool *pool)
11 {
12         struct device *dev = mlx5_core_dma_dev(priv->mdev);
13
14         return xsk_pool_dma_map(pool, dev, DMA_ATTR_SKIP_CPU_SYNC);
15 }
16
17 static void mlx5e_xsk_unmap_pool(struct mlx5e_priv *priv,
18                                  struct xsk_buff_pool *pool)
19 {
20         return xsk_pool_dma_unmap(pool, DMA_ATTR_SKIP_CPU_SYNC);
21 }
22
23 static int mlx5e_xsk_get_pools(struct mlx5e_xsk *xsk)
24 {
25         if (!xsk->pools) {
26                 xsk->pools = kcalloc(MLX5E_MAX_NUM_CHANNELS,
27                                      sizeof(*xsk->pools), GFP_KERNEL);
28                 if (unlikely(!xsk->pools))
29                         return -ENOMEM;
30         }
31
32         xsk->refcnt++;
33         xsk->ever_used = true;
34
35         return 0;
36 }
37
38 static void mlx5e_xsk_put_pools(struct mlx5e_xsk *xsk)
39 {
40         if (!--xsk->refcnt) {
41                 kfree(xsk->pools);
42                 xsk->pools = NULL;
43         }
44 }
45
46 static int mlx5e_xsk_add_pool(struct mlx5e_xsk *xsk, struct xsk_buff_pool *pool, u16 ix)
47 {
48         int err;
49
50         err = mlx5e_xsk_get_pools(xsk);
51         if (unlikely(err))
52                 return err;
53
54         xsk->pools[ix] = pool;
55         return 0;
56 }
57
58 static void mlx5e_xsk_remove_pool(struct mlx5e_xsk *xsk, u16 ix)
59 {
60         xsk->pools[ix] = NULL;
61
62         mlx5e_xsk_put_pools(xsk);
63 }
64
65 static bool mlx5e_xsk_is_pool_sane(struct xsk_buff_pool *pool)
66 {
67         return xsk_pool_get_headroom(pool) <= 0xffff &&
68                 xsk_pool_get_chunk_size(pool) <= 0xffff;
69 }
70
71 void mlx5e_build_xsk_param(struct xsk_buff_pool *pool, struct mlx5e_xsk_param *xsk)
72 {
73         xsk->headroom = xsk_pool_get_headroom(pool);
74         xsk->chunk_size = xsk_pool_get_chunk_size(pool);
75         xsk->unaligned = pool->unaligned;
76 }
77
78 static int mlx5e_xsk_enable_locked(struct mlx5e_priv *priv,
79                                    struct xsk_buff_pool *pool, u16 ix)
80 {
81         struct mlx5e_params *params = &priv->channels.params;
82         struct mlx5e_xsk_param xsk;
83         struct mlx5e_channel *c;
84         int err;
85
86         if (unlikely(mlx5e_xsk_get_pool(&priv->channels.params, &priv->xsk, ix)))
87                 return -EBUSY;
88
89         if (unlikely(!mlx5e_xsk_is_pool_sane(pool)))
90                 return -EINVAL;
91
92         err = mlx5e_xsk_map_pool(priv, pool);
93         if (unlikely(err))
94                 return err;
95
96         err = mlx5e_xsk_add_pool(&priv->xsk, pool, ix);
97         if (unlikely(err))
98                 goto err_unmap_pool;
99
100         mlx5e_build_xsk_param(pool, &xsk);
101
102         if (priv->channels.params.rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ &&
103             mlx5e_mpwrq_umr_mode(priv->mdev, &xsk) == MLX5E_MPWRQ_UMR_MODE_OVERSIZED) {
104                 const char *recommendation = is_power_of_2(xsk.chunk_size) ?
105                         "Upgrade firmware" : "Disable striding RQ";
106
107                 mlx5_core_warn(priv->mdev, "Expected slowdown with XSK frame size %u. %s for better performance.\n",
108                                xsk.chunk_size, recommendation);
109         }
110
111         if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
112                 /* XSK objects will be created on open. */
113                 goto validate_closed;
114         }
115
116         if (!params->xdp_prog) {
117                 /* XSK objects will be created when an XDP program is set,
118                  * and the channels are reopened.
119                  */
120                 goto validate_closed;
121         }
122
123         c = priv->channels.c[ix];
124
125         err = mlx5e_open_xsk(priv, params, &xsk, pool, c);
126         if (unlikely(err))
127                 goto err_remove_pool;
128
129         mlx5e_activate_xsk(c);
130         mlx5e_trigger_napi_icosq(c);
131
132         /* Don't wait for WQEs, because the newer xdpsock sample doesn't provide
133          * any Fill Ring entries at the setup stage.
134          */
135
136         mlx5e_rx_res_xsk_update(priv->rx_res, &priv->channels, ix, true);
137
138         mlx5e_deactivate_rq(&c->rq);
139         mlx5e_flush_rq(&c->rq, MLX5_RQC_STATE_RDY);
140
141         return 0;
142
143 err_remove_pool:
144         mlx5e_xsk_remove_pool(&priv->xsk, ix);
145
146 err_unmap_pool:
147         mlx5e_xsk_unmap_pool(priv, pool);
148
149         return err;
150
151 validate_closed:
152         /* Check the configuration in advance, rather than fail at a later stage
153          * (in mlx5e_xdp_set or on open) and end up with no channels.
154          */
155         if (!mlx5e_validate_xsk_param(params, &xsk, priv->mdev)) {
156                 err = -EINVAL;
157                 goto err_remove_pool;
158         }
159
160         return 0;
161 }
162
163 static int mlx5e_xsk_disable_locked(struct mlx5e_priv *priv, u16 ix)
164 {
165         struct xsk_buff_pool *pool = mlx5e_xsk_get_pool(&priv->channels.params,
166                                                    &priv->xsk, ix);
167         struct mlx5e_channel *c;
168
169         if (unlikely(!pool))
170                 return -EINVAL;
171
172         if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
173                 goto remove_pool;
174
175         /* XSK RQ and SQ are only created if XDP program is set. */
176         if (!priv->channels.params.xdp_prog)
177                 goto remove_pool;
178
179         c = priv->channels.c[ix];
180
181         mlx5e_activate_rq(&c->rq);
182         mlx5e_trigger_napi_icosq(c);
183         mlx5e_wait_for_min_rx_wqes(&c->rq, MLX5E_RQ_WQES_TIMEOUT);
184
185         mlx5e_rx_res_xsk_update(priv->rx_res, &priv->channels, ix, false);
186
187         mlx5e_deactivate_xsk(c);
188         mlx5e_close_xsk(c);
189
190 remove_pool:
191         mlx5e_xsk_remove_pool(&priv->xsk, ix);
192         mlx5e_xsk_unmap_pool(priv, pool);
193
194         return 0;
195 }
196
197 static int mlx5e_xsk_enable_pool(struct mlx5e_priv *priv, struct xsk_buff_pool *pool,
198                                  u16 ix)
199 {
200         int err;
201
202         mutex_lock(&priv->state_lock);
203         err = mlx5e_xsk_enable_locked(priv, pool, ix);
204         mutex_unlock(&priv->state_lock);
205
206         return err;
207 }
208
209 static int mlx5e_xsk_disable_pool(struct mlx5e_priv *priv, u16 ix)
210 {
211         int err;
212
213         mutex_lock(&priv->state_lock);
214         err = mlx5e_xsk_disable_locked(priv, ix);
215         mutex_unlock(&priv->state_lock);
216
217         return err;
218 }
219
220 int mlx5e_xsk_setup_pool(struct net_device *dev, struct xsk_buff_pool *pool, u16 qid)
221 {
222         struct mlx5e_priv *priv = netdev_priv(dev);
223         struct mlx5e_params *params = &priv->channels.params;
224
225         if (unlikely(qid >= params->num_channels))
226                 return -EINVAL;
227
228         return pool ? mlx5e_xsk_enable_pool(priv, pool, qid) :
229                       mlx5e_xsk_disable_pool(priv, qid);
230 }