Revert "mlx5 updates 2023-12-20"
[linux-block.git] / drivers / net / ethernet / mellanox / mlx5 / core / en / xsk / pool.c
CommitLineData
db05815b 1// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
1742b3d5 2/* Copyright (c) 2019-2020, Mellanox Technologies inc. All rights reserved. */
db05815b 3
a71506a4 4#include <net/xdp_sock_drv.h>
1742b3d5 5#include "pool.h"
db05815b
MM
6#include "setup.h"
7#include "en/params.h"
8
3fbf6120 9static int mlx5e_xsk_map_pool(struct mlx5e_priv *priv,
1742b3d5 10 struct xsk_buff_pool *pool)
db05815b 11{
3fbf6120 12 struct device *dev = mlx5_core_dma_dev(priv->mdev);
db05815b 13
0b7cfa40 14 return xsk_pool_dma_map(pool, dev, DMA_ATTR_SKIP_CPU_SYNC);
db05815b
MM
15}
16
1742b3d5
MK
17static void mlx5e_xsk_unmap_pool(struct mlx5e_priv *priv,
18 struct xsk_buff_pool *pool)
db05815b 19{
0b7cfa40 20 return xsk_pool_dma_unmap(pool, DMA_ATTR_SKIP_CPU_SYNC);
db05815b
MM
21}
22
1742b3d5 23static int mlx5e_xsk_get_pools(struct mlx5e_xsk *xsk)
db05815b 24{
1742b3d5
MK
25 if (!xsk->pools) {
26 xsk->pools = kcalloc(MLX5E_MAX_NUM_CHANNELS,
27 sizeof(*xsk->pools), GFP_KERNEL);
28 if (unlikely(!xsk->pools))
db05815b
MM
29 return -ENOMEM;
30 }
31
32 xsk->refcnt++;
33 xsk->ever_used = true;
34
35 return 0;
36}
37
1742b3d5 38static void mlx5e_xsk_put_pools(struct mlx5e_xsk *xsk)
db05815b
MM
39{
40 if (!--xsk->refcnt) {
1742b3d5
MK
41 kfree(xsk->pools);
42 xsk->pools = NULL;
db05815b
MM
43 }
44}
45
1742b3d5 46static int mlx5e_xsk_add_pool(struct mlx5e_xsk *xsk, struct xsk_buff_pool *pool, u16 ix)
db05815b
MM
47{
48 int err;
49
1742b3d5 50 err = mlx5e_xsk_get_pools(xsk);
db05815b
MM
51 if (unlikely(err))
52 return err;
53
1742b3d5 54 xsk->pools[ix] = pool;
db05815b
MM
55 return 0;
56}
57
1742b3d5 58static void mlx5e_xsk_remove_pool(struct mlx5e_xsk *xsk, u16 ix)
db05815b 59{
1742b3d5 60 xsk->pools[ix] = NULL;
db05815b 61
1742b3d5 62 mlx5e_xsk_put_pools(xsk);
db05815b
MM
63}
64
1742b3d5 65static bool mlx5e_xsk_is_pool_sane(struct xsk_buff_pool *pool)
db05815b 66{
c4655761
MK
67 return xsk_pool_get_headroom(pool) <= 0xffff &&
68 xsk_pool_get_chunk_size(pool) <= 0xffff;
db05815b
MM
69}
70
1742b3d5 71void mlx5e_build_xsk_param(struct xsk_buff_pool *pool, struct mlx5e_xsk_param *xsk)
db05815b 72{
c4655761
MK
73 xsk->headroom = xsk_pool_get_headroom(pool);
74 xsk->chunk_size = xsk_pool_get_chunk_size(pool);
6470d2e7 75 xsk->unaligned = pool->unaligned;
db05815b
MM
76}
77
78static int mlx5e_xsk_enable_locked(struct mlx5e_priv *priv,
1742b3d5 79 struct xsk_buff_pool *pool, u16 ix)
db05815b
MM
80{
81 struct mlx5e_params *params = &priv->channels.params;
82 struct mlx5e_xsk_param xsk;
83 struct mlx5e_channel *c;
84 int err;
85
1742b3d5 86 if (unlikely(mlx5e_xsk_get_pool(&priv->channels.params, &priv->xsk, ix)))
db05815b
MM
87 return -EBUSY;
88
1742b3d5 89 if (unlikely(!mlx5e_xsk_is_pool_sane(pool)))
db05815b
MM
90 return -EINVAL;
91
3fbf6120 92 err = mlx5e_xsk_map_pool(priv, pool);
db05815b
MM
93 if (unlikely(err))
94 return err;
95
1742b3d5 96 err = mlx5e_xsk_add_pool(&priv->xsk, pool, ix);
db05815b 97 if (unlikely(err))
1742b3d5 98 goto err_unmap_pool;
db05815b 99
1742b3d5 100 mlx5e_build_xsk_param(pool, &xsk);
db05815b 101
c6f04204
MM
102 if (priv->channels.params.rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ &&
103 mlx5e_mpwrq_umr_mode(priv->mdev, &xsk) == MLX5E_MPWRQ_UMR_MODE_OVERSIZED) {
104 const char *recommendation = is_power_of_2(xsk.chunk_size) ?
105 "Upgrade firmware" : "Disable striding RQ";
106
107 mlx5_core_warn(priv->mdev, "Expected slowdown with XSK frame size %u. %s for better performance.\n",
108 xsk.chunk_size, recommendation);
109 }
110
db05815b
MM
111 if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
112 /* XSK objects will be created on open. */
113 goto validate_closed;
114 }
115
116 if (!params->xdp_prog) {
117 /* XSK objects will be created when an XDP program is set,
118 * and the channels are reopened.
119 */
120 goto validate_closed;
121 }
122
123 c = priv->channels.c[ix];
124
1742b3d5 125 err = mlx5e_open_xsk(priv, params, &xsk, pool, c);
db05815b 126 if (unlikely(err))
1742b3d5 127 goto err_remove_pool;
db05815b
MM
128
129 mlx5e_activate_xsk(c);
2e642afb 130 mlx5e_trigger_napi_icosq(c);
db05815b
MM
131
132 /* Don't wait for WQEs, because the newer xdpsock sample doesn't provide
133 * any Fill Ring entries at the setup stage.
134 */
135
3db4c85c 136 mlx5e_rx_res_xsk_update(priv->rx_res, &priv->channels, ix, true);
db05815b 137
082a9edf
MM
138 mlx5e_deactivate_rq(&c->rq);
139 mlx5e_flush_rq(&c->rq, MLX5_RQC_STATE_RDY);
140
db05815b
MM
141 return 0;
142
1742b3d5
MK
143err_remove_pool:
144 mlx5e_xsk_remove_pool(&priv->xsk, ix);
db05815b 145
1742b3d5
MK
146err_unmap_pool:
147 mlx5e_xsk_unmap_pool(priv, pool);
db05815b
MM
148
149 return err;
150
151validate_closed:
152 /* Check the configuration in advance, rather than fail at a later stage
153 * (in mlx5e_xdp_set or on open) and end up with no channels.
154 */
155 if (!mlx5e_validate_xsk_param(params, &xsk, priv->mdev)) {
156 err = -EINVAL;
1742b3d5 157 goto err_remove_pool;
db05815b
MM
158 }
159
160 return 0;
161}
162
163static int mlx5e_xsk_disable_locked(struct mlx5e_priv *priv, u16 ix)
164{
1742b3d5 165 struct xsk_buff_pool *pool = mlx5e_xsk_get_pool(&priv->channels.params,
db05815b
MM
166 &priv->xsk, ix);
167 struct mlx5e_channel *c;
168
1742b3d5 169 if (unlikely(!pool))
db05815b
MM
170 return -EINVAL;
171
172 if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
1742b3d5 173 goto remove_pool;
db05815b
MM
174
175 /* XSK RQ and SQ are only created if XDP program is set. */
176 if (!priv->channels.params.xdp_prog)
1742b3d5 177 goto remove_pool;
db05815b
MM
178
179 c = priv->channels.c[ix];
082a9edf
MM
180
181 mlx5e_activate_rq(&c->rq);
182 mlx5e_trigger_napi_icosq(c);
183 mlx5e_wait_for_min_rx_wqes(&c->rq, MLX5E_RQ_WQES_TIMEOUT);
184
3db4c85c 185 mlx5e_rx_res_xsk_update(priv->rx_res, &priv->channels, ix, false);
082a9edf 186
db05815b
MM
187 mlx5e_deactivate_xsk(c);
188 mlx5e_close_xsk(c);
189
1742b3d5
MK
190remove_pool:
191 mlx5e_xsk_remove_pool(&priv->xsk, ix);
192 mlx5e_xsk_unmap_pool(priv, pool);
db05815b
MM
193
194 return 0;
195}
196
1742b3d5 197static int mlx5e_xsk_enable_pool(struct mlx5e_priv *priv, struct xsk_buff_pool *pool,
db05815b
MM
198 u16 ix)
199{
200 int err;
201
202 mutex_lock(&priv->state_lock);
1742b3d5 203 err = mlx5e_xsk_enable_locked(priv, pool, ix);
db05815b
MM
204 mutex_unlock(&priv->state_lock);
205
206 return err;
207}
208
1742b3d5 209static int mlx5e_xsk_disable_pool(struct mlx5e_priv *priv, u16 ix)
db05815b
MM
210{
211 int err;
212
213 mutex_lock(&priv->state_lock);
214 err = mlx5e_xsk_disable_locked(priv, ix);
215 mutex_unlock(&priv->state_lock);
216
217 return err;
218}
219
1742b3d5 220int mlx5e_xsk_setup_pool(struct net_device *dev, struct xsk_buff_pool *pool, u16 qid)
db05815b
MM
221{
222 struct mlx5e_priv *priv = netdev_priv(dev);
223 struct mlx5e_params *params = &priv->channels.params;
db05815b 224
3db4c85c 225 if (unlikely(qid >= params->num_channels))
db05815b
MM
226 return -EINVAL;
227
3db4c85c
MM
228 return pool ? mlx5e_xsk_enable_pool(priv, pool, qid) :
229 mlx5e_xsk_disable_pool(priv, qid);
db05815b 230}