xsk: Propagate napi_id to XDP socket Rx path
[linux-2.6-block.git] / drivers / net / ethernet / mellanox / mlx5 / core / en_main.c
CommitLineData
f62b8bb8 1/*
b3f63c3d 2 * Copyright (c) 2015-2016, Mellanox Technologies. All rights reserved.
f62b8bb8
AV
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
e8f887ac
AV
33#include <net/tc_act/tc_gact.h>
34#include <net/pkt_cls.h>
86d722ad 35#include <linux/mlx5/fs.h>
b3f63c3d 36#include <net/vxlan.h>
e3cfc7e6 37#include <net/geneve.h>
86994156 38#include <linux/bpf.h>
4b89251d 39#include <linux/if_bridge.h>
60bbf7ee 40#include <net/page_pool.h>
39d6443c 41#include <net/xdp_sock_drv.h>
1d447a39 42#include "eswitch.h"
f62b8bb8 43#include "en.h"
542578c6 44#include "en/txrx.h"
e8f887ac 45#include "en_tc.h"
1d447a39 46#include "en_rep.h"
547eede0 47#include "en_accel/ipsec.h"
e3cfc7e6 48#include "en_accel/en_accel.h"
c83294b9 49#include "en_accel/tls.h"
899a59d3 50#include "accel/ipsec.h"
c83294b9 51#include "accel/tls.h"
358aa5ce 52#include "lib/vxlan.h"
6dbc80ca 53#include "lib/clock.h"
2c81bfd5 54#include "en/port.h"
159d2131 55#include "en/xdp.h"
f2f3df55 56#include "lib/eq.h"
5c7e8bbb 57#include "en/monitor_stats.h"
4edc17fd 58#include "en/health.h"
9a22d5d8 59#include "en/params.h"
1742b3d5 60#include "en/xsk/pool.h"
db05815b
MM
61#include "en/xsk/setup.h"
62#include "en/xsk/rx.h"
63#include "en/xsk/tx.h"
cef35af3 64#include "en/hv_vhca_stats.h"
c6acd629 65#include "en/devlink.h"
71c6eaeb 66#include "lib/mlx5.h"
f62b8bb8 67
2ccb0a79 68bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev)
2fc4bfb7 69{
ea3886ca 70 bool striding_rq_umr = MLX5_CAP_GEN(mdev, striding_rq) &&
2fc4bfb7
SM
71 MLX5_CAP_GEN(mdev, umr_ptr_rlky) &&
72 MLX5_CAP_ETH(mdev, reg_umr_sq);
ea3886ca
TT
73 u16 max_wqe_sz_cap = MLX5_CAP_GEN(mdev, max_wqe_sz_sq);
74 bool inline_umr = MLX5E_UMR_WQE_INLINE_SZ <= max_wqe_sz_cap;
75
76 if (!striding_rq_umr)
77 return false;
78 if (!inline_umr) {
79 mlx5_core_warn(mdev, "Cannot support Striding RQ: UMR WQE size (%d) exceeds maximum supported (%d).\n",
80 (int)MLX5E_UMR_WQE_INLINE_SZ, max_wqe_sz_cap);
81 return false;
82 }
83 return true;
2fc4bfb7
SM
84}
85
696a97cf 86void mlx5e_init_rq_type_params(struct mlx5_core_dev *mdev,
2a0f561b 87 struct mlx5e_params *params)
2fc4bfb7 88{
73281b78
TT
89 params->log_rq_mtu_frames = is_kdump_kernel() ?
90 MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE :
91 MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE;
2fc4bfb7 92
6a9764ef
SM
93 mlx5_core_info(mdev, "MLX5E: StrdRq(%d) RqSz(%ld) StrdSz(%ld) RxCqeCmprss(%d)\n",
94 params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ,
619a8f2a 95 params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ ?
db05815b 96 BIT(mlx5e_mpwqe_get_log_rq_size(params, NULL)) :
73281b78 97 BIT(params->log_rq_mtu_frames),
db05815b 98 BIT(mlx5e_mpwqe_get_log_stride_size(mdev, params, NULL)),
6a9764ef 99 MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS));
2fc4bfb7
SM
100}
101
2ccb0a79
TT
102bool mlx5e_striding_rq_possible(struct mlx5_core_dev *mdev,
103 struct mlx5e_params *params)
104{
db05815b
MM
105 if (!mlx5e_check_fragmented_striding_rq_cap(mdev))
106 return false;
107
108 if (MLX5_IPSEC_DEV(mdev))
109 return false;
110
111 if (params->xdp_prog) {
112 /* XSK params are not considered here. If striding RQ is in use,
113 * and an XSK is being opened, mlx5e_rx_mpwqe_is_linear_skb will
114 * be called with the known XSK params.
115 */
116 if (!mlx5e_rx_mpwqe_is_linear_skb(mdev, params, NULL))
117 return false;
118 }
119
120 return true;
2ccb0a79 121}
291f445e 122
2ccb0a79 123void mlx5e_set_rq_type(struct mlx5_core_dev *mdev, struct mlx5e_params *params)
2fc4bfb7 124{
2ccb0a79
TT
125 params->rq_wq_type = mlx5e_striding_rq_possible(mdev, params) &&
126 MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_STRIDING_RQ) ?
291f445e 127 MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ :
99cbfa93 128 MLX5_WQ_TYPE_CYCLIC;
2fc4bfb7
SM
129}
130
b36cdb42 131void mlx5e_update_carrier(struct mlx5e_priv *priv)
f62b8bb8
AV
132{
133 struct mlx5_core_dev *mdev = priv->mdev;
134 u8 port_state;
135
136 port_state = mlx5_query_vport_state(mdev,
cc9c82a8 137 MLX5_VPORT_STATE_OP_MOD_VNIC_VPORT,
e53eef63 138 0);
f62b8bb8 139
87424ad5
SD
140 if (port_state == VPORT_STATE_UP) {
141 netdev_info(priv->netdev, "Link up\n");
f62b8bb8 142 netif_carrier_on(priv->netdev);
87424ad5
SD
143 } else {
144 netdev_info(priv->netdev, "Link down\n");
f62b8bb8 145 netif_carrier_off(priv->netdev);
87424ad5 146 }
f62b8bb8
AV
147}
148
149static void mlx5e_update_carrier_work(struct work_struct *work)
150{
151 struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
152 update_carrier_work);
153
154 mutex_lock(&priv->state_lock);
155 if (test_bit(MLX5E_STATE_OPENED, &priv->state))
7ca42c80
ES
156 if (priv->profile->update_carrier)
157 priv->profile->update_carrier(priv);
f62b8bb8
AV
158 mutex_unlock(&priv->state_lock);
159}
160
303211b4 161static void mlx5e_update_stats_work(struct work_struct *work)
f62b8bb8 162{
cdeef2b1 163 struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
f62b8bb8 164 update_stats_work);
ed56c519 165
f62b8bb8 166 mutex_lock(&priv->state_lock);
ed56c519 167 priv->profile->update_stats(priv);
f62b8bb8
AV
168 mutex_unlock(&priv->state_lock);
169}
170
cdeef2b1
SM
171void mlx5e_queue_update_stats(struct mlx5e_priv *priv)
172{
173 if (!priv->profile->update_stats)
174 return;
175
176 if (unlikely(test_bit(MLX5E_STATE_DESTROYING, &priv->state)))
177 return;
178
179 queue_work(priv->wq, &priv->update_stats_work);
180}
181
7cffaddd 182static int async_event(struct notifier_block *nb, unsigned long event, void *data)
f62b8bb8 183{
7cffaddd
SM
184 struct mlx5e_priv *priv = container_of(nb, struct mlx5e_priv, events_nb);
185 struct mlx5_eqe *eqe = data;
daa21560 186
7cffaddd
SM
187 if (event != MLX5_EVENT_TYPE_PORT_CHANGE)
188 return NOTIFY_DONE;
daa21560 189
7cffaddd
SM
190 switch (eqe->sub_type) {
191 case MLX5_PORT_CHANGE_SUBTYPE_DOWN:
192 case MLX5_PORT_CHANGE_SUBTYPE_ACTIVE:
7bb29755 193 queue_work(priv->wq, &priv->update_carrier_work);
f62b8bb8 194 break;
f62b8bb8 195 default:
7cffaddd 196 return NOTIFY_DONE;
f62b8bb8 197 }
7cffaddd
SM
198
199 return NOTIFY_OK;
f62b8bb8
AV
200}
201
f62b8bb8
AV
202static void mlx5e_enable_async_events(struct mlx5e_priv *priv)
203{
7cffaddd
SM
204 priv->events_nb.notifier_call = async_event;
205 mlx5_notifier_register(priv->mdev, &priv->events_nb);
f62b8bb8
AV
206}
207
208static void mlx5e_disable_async_events(struct mlx5e_priv *priv)
209{
7cffaddd 210 mlx5_notifier_unregister(priv->mdev, &priv->events_nb);
f62b8bb8
AV
211}
212
31391048
SM
213static inline void mlx5e_build_umr_wqe(struct mlx5e_rq *rq,
214 struct mlx5e_icosq *sq,
b8a98a4c 215 struct mlx5e_umr_wqe *wqe)
7e426671
TT
216{
217 struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
218 struct mlx5_wqe_umr_ctrl_seg *ucseg = &wqe->uctrl;
ea3886ca 219 u8 ds_cnt = DIV_ROUND_UP(MLX5E_UMR_WQE_INLINE_SZ, MLX5_SEND_WQE_DS);
7e426671
TT
220
221 cseg->qpn_ds = cpu_to_be32((sq->sqn << MLX5_WQE_CTRL_QPN_SHIFT) |
222 ds_cnt);
e658664c 223 cseg->umr_mkey = rq->mkey_be;
7e426671 224
ea3886ca 225 ucseg->flags = MLX5_UMR_TRANSLATION_OFFSET_EN | MLX5_UMR_INLINE;
31616255 226 ucseg->xlt_octowords =
7e426671 227 cpu_to_be16(MLX5_MTT_OCTW(MLX5_MPWRQ_PAGES_PER_WQE));
7e426671 228 ucseg->mkey_mask = cpu_to_be64(MLX5_MKEY_MASK_FREE);
7e426671
TT
229}
230
231static int mlx5e_rq_alloc_mpwqe_info(struct mlx5e_rq *rq,
232 struct mlx5e_channel *c)
233{
422d4c40 234 int wq_sz = mlx5_wq_ll_get_size(&rq->mpwqe.wq);
7e426671 235
eec4edc9
KC
236 rq->mpwqe.info = kvzalloc_node(array_size(wq_sz,
237 sizeof(*rq->mpwqe.info)),
ca11b798 238 GFP_KERNEL, cpu_to_node(c->cpu));
21c59685 239 if (!rq->mpwqe.info)
ea3886ca 240 return -ENOMEM;
7e426671 241
b8a98a4c 242 mlx5e_build_umr_wqe(rq, &c->icosq, &rq->mpwqe.umr_wqe);
7e426671
TT
243
244 return 0;
7e426671
TT
245}
246
a43b25da 247static int mlx5e_create_umr_mkey(struct mlx5_core_dev *mdev,
ec8b9981 248 u64 npages, u8 page_shift,
c3c94023
AL
249 struct mlx5_core_mkey *umr_mkey,
250 dma_addr_t filler_addr)
3608ae77 251{
c3c94023
AL
252 struct mlx5_mtt *mtt;
253 int inlen;
3608ae77
TT
254 void *mkc;
255 u32 *in;
256 int err;
c3c94023
AL
257 int i;
258
259 inlen = MLX5_ST_SZ_BYTES(create_mkey_in) + sizeof(*mtt) * npages;
3608ae77 260
1b9a07ee 261 in = kvzalloc(inlen, GFP_KERNEL);
3608ae77
TT
262 if (!in)
263 return -ENOMEM;
264
265 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
266
3608ae77
TT
267 MLX5_SET(mkc, mkc, free, 1);
268 MLX5_SET(mkc, mkc, umr_en, 1);
269 MLX5_SET(mkc, mkc, lw, 1);
270 MLX5_SET(mkc, mkc, lr, 1);
cdbd0d2b 271 MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_MTT);
17347d54 272 mlx5e_mkey_set_relaxed_ordering(mdev, mkc);
3608ae77
TT
273 MLX5_SET(mkc, mkc, qpn, 0xffffff);
274 MLX5_SET(mkc, mkc, pd, mdev->mlx5e_res.pdn);
ec8b9981 275 MLX5_SET64(mkc, mkc, len, npages << page_shift);
3608ae77
TT
276 MLX5_SET(mkc, mkc, translations_octword_size,
277 MLX5_MTT_OCTW(npages));
ec8b9981 278 MLX5_SET(mkc, mkc, log_page_size, page_shift);
c3c94023
AL
279 MLX5_SET(create_mkey_in, in, translations_octword_actual_size,
280 MLX5_MTT_OCTW(npages));
281
282 /* Initialize the mkey with all MTTs pointing to a default
283 * page (filler_addr). When the channels are activated, UMR
284 * WQEs will redirect the RX WQEs to the actual memory from
285 * the RQ's pool, while the gaps (wqe_overflow) remain mapped
286 * to the default page.
287 */
288 mtt = MLX5_ADDR_OF(create_mkey_in, in, klm_pas_mtt);
289 for (i = 0 ; i < npages ; i++)
290 mtt[i].ptag = cpu_to_be64(filler_addr);
3608ae77 291
ec8b9981 292 err = mlx5_core_create_mkey(mdev, umr_mkey, in, inlen);
3608ae77
TT
293
294 kvfree(in);
295 return err;
296}
297
a43b25da 298static int mlx5e_create_rq_umr_mkey(struct mlx5_core_dev *mdev, struct mlx5e_rq *rq)
ec8b9981 299{
422d4c40 300 u64 num_mtts = MLX5E_REQUIRED_MTTS(mlx5_wq_ll_get_size(&rq->mpwqe.wq));
ec8b9981 301
c3c94023
AL
302 return mlx5e_create_umr_mkey(mdev, num_mtts, PAGE_SHIFT, &rq->umr_mkey,
303 rq->wqe_overflow.addr);
ec8b9981
TT
304}
305
b8a98a4c
TT
306static inline u64 mlx5e_get_mpwqe_offset(struct mlx5e_rq *rq, u16 wqe_ix)
307{
308 return (wqe_ix << MLX5E_LOG_ALIGNED_MPWQE_PPW) << PAGE_SHIFT;
309}
310
069d1146
TT
311static void mlx5e_init_frags_partition(struct mlx5e_rq *rq)
312{
60d60c8f
QC
313 struct mlx5e_wqe_frag_info next_frag = {};
314 struct mlx5e_wqe_frag_info *prev = NULL;
069d1146
TT
315 int i;
316
317 next_frag.di = &rq->wqe.di[0];
069d1146
TT
318
319 for (i = 0; i < mlx5_wq_cyc_get_size(&rq->wqe.wq); i++) {
320 struct mlx5e_rq_frag_info *frag_info = &rq->wqe.info.arr[0];
321 struct mlx5e_wqe_frag_info *frag =
322 &rq->wqe.frags[i << rq->wqe.info.log_num_frags];
323 int f;
324
325 for (f = 0; f < rq->wqe.info.num_frags; f++, frag++) {
326 if (next_frag.offset + frag_info[f].frag_stride > PAGE_SIZE) {
327 next_frag.di++;
328 next_frag.offset = 0;
329 if (prev)
330 prev->last_in_page = true;
331 }
332 *frag = next_frag;
333
334 /* prepare next */
335 next_frag.offset += frag_info[f].frag_stride;
336 prev = frag;
337 }
338 }
339
340 if (prev)
341 prev->last_in_page = true;
342}
343
344static int mlx5e_init_di_list(struct mlx5e_rq *rq,
069d1146
TT
345 int wq_sz, int cpu)
346{
347 int len = wq_sz << rq->wqe.info.log_num_frags;
348
84ca176b 349 rq->wqe.di = kvzalloc_node(array_size(len, sizeof(*rq->wqe.di)),
069d1146
TT
350 GFP_KERNEL, cpu_to_node(cpu));
351 if (!rq->wqe.di)
352 return -ENOMEM;
353
354 mlx5e_init_frags_partition(rq);
355
356 return 0;
357}
358
359static void mlx5e_free_di_list(struct mlx5e_rq *rq)
360{
361 kvfree(rq->wqe.di);
362}
363
8276ea13
AL
364static void mlx5e_rq_err_cqe_work(struct work_struct *recover_work)
365{
366 struct mlx5e_rq *rq = container_of(recover_work, struct mlx5e_rq, recover_work);
367
368 mlx5e_reporter_rq_cqe_err(rq);
369}
370
c3c94023
AL
371static int mlx5e_alloc_mpwqe_rq_drop_page(struct mlx5e_rq *rq)
372{
373 rq->wqe_overflow.page = alloc_page(GFP_KERNEL);
374 if (!rq->wqe_overflow.page)
375 return -ENOMEM;
376
377 rq->wqe_overflow.addr = dma_map_page(rq->pdev, rq->wqe_overflow.page, 0,
378 PAGE_SIZE, rq->buff.map_dir);
379 if (dma_mapping_error(rq->pdev, rq->wqe_overflow.addr)) {
380 __free_page(rq->wqe_overflow.page);
381 return -ENOMEM;
382 }
383 return 0;
384}
385
386static void mlx5e_free_mpwqe_rq_drop_page(struct mlx5e_rq *rq)
387{
388 dma_unmap_page(rq->pdev, rq->wqe_overflow.addr, PAGE_SIZE,
389 rq->buff.map_dir);
390 __free_page(rq->wqe_overflow.page);
391}
392
3b77235b 393static int mlx5e_alloc_rq(struct mlx5e_channel *c,
6a9764ef 394 struct mlx5e_params *params,
db05815b 395 struct mlx5e_xsk_param *xsk,
1742b3d5 396 struct xsk_buff_pool *xsk_pool,
6a9764ef 397 struct mlx5e_rq_param *rqp,
3b77235b 398 struct mlx5e_rq *rq)
f62b8bb8 399{
60bbf7ee 400 struct page_pool_params pp_params = { 0 };
a43b25da 401 struct mlx5_core_dev *mdev = c->mdev;
6a9764ef 402 void *rqc = rqp->rqc;
f62b8bb8 403 void *rqc_wq = MLX5_ADDR_OF(rqc, rqc, wq);
db05815b 404 u32 rq_xdp_ix;
069d1146 405 u32 pool_size;
f62b8bb8
AV
406 int wq_sz;
407 int err;
408 int i;
409
231243c8 410 rqp->wq.db_numa_node = cpu_to_node(c->cpu);
311c7c71 411
6a9764ef 412 rq->wq_type = params->rq_wq_type;
7e426671
TT
413 rq->pdev = c->pdev;
414 rq->netdev = c->netdev;
a43b25da 415 rq->tstamp = c->tstamp;
7c39afb3 416 rq->clock = &mdev->clock;
7e426671
TT
417 rq->channel = c;
418 rq->ix = c->ix;
a43b25da 419 rq->mdev = mdev;
0073c8f7 420 rq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);
b9673cf5 421 rq->xdpsq = &c->rq_xdpsq;
1742b3d5 422 rq->xsk_pool = xsk_pool;
db05815b 423
1742b3d5 424 if (rq->xsk_pool)
db05815b
MM
425 rq->stats = &c->priv->channel_stats[c->ix].xskrq;
426 else
427 rq->stats = &c->priv->channel_stats[c->ix].rq;
8276ea13 428 INIT_WORK(&rq->recover_work, mlx5e_rq_err_cqe_work);
97bc402d 429
85192dbf
AN
430 if (params->xdp_prog)
431 bpf_prog_inc(params->xdp_prog);
fe45386a 432 RCU_INIT_POINTER(rq->xdp_prog, params->xdp_prog);
7e426671 433
db05815b
MM
434 rq_xdp_ix = rq->ix;
435 if (xsk)
436 rq_xdp_ix += params->num_channels * MLX5E_RQ_GROUP_XSK;
b02e5a0e 437 err = xdp_rxq_info_reg(&rq->xdp_rxq, rq->netdev, rq_xdp_ix, 0);
e213f5b6 438 if (err < 0)
08a762ce 439 goto err_rq_xdp_prog;
0ddf5432 440
fe45386a 441 rq->buff.map_dir = params->xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
db05815b 442 rq->buff.headroom = mlx5e_get_rq_headroom(mdev, params, xsk);
60bbf7ee 443 pool_size = 1 << params->log_rq_mtu_frames;
b5503b99 444
6a9764ef 445 switch (rq->wq_type) {
461017cb 446 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
422d4c40
TT
447 err = mlx5_wq_ll_create(mdev, &rqp->wq, rqc_wq, &rq->mpwqe.wq,
448 &rq->wq_ctrl);
449 if (err)
08a762ce 450 goto err_rq_xdp;
422d4c40 451
c3c94023 452 err = mlx5e_alloc_mpwqe_rq_drop_page(rq);
422d4c40 453 if (err)
e692139e 454 goto err_rq_wq_destroy;
422d4c40
TT
455
456 rq->mpwqe.wq.db = &rq->mpwqe.wq.db[MLX5_RCV_DBR];
457
458 wq_sz = mlx5_wq_ll_get_size(&rq->mpwqe.wq);
60bbf7ee 459
db05815b
MM
460 pool_size = MLX5_MPWRQ_PAGES_PER_WQE <<
461 mlx5e_mpwqe_get_log_rq_size(params, xsk);
422d4c40 462
db05815b
MM
463 rq->mpwqe.log_stride_sz = mlx5e_mpwqe_get_log_stride_size(mdev, params, xsk);
464 rq->mpwqe.num_strides =
465 BIT(mlx5e_mpwqe_get_log_num_strides(mdev, params, xsk));
1bfecfca 466
d628ee4f
JDB
467 rq->buff.frame0_sz = (1 << rq->mpwqe.log_stride_sz);
468
a43b25da 469 err = mlx5e_create_rq_umr_mkey(mdev, rq);
7e426671 470 if (err)
c3c94023 471 goto err_rq_drop_page;
ec8b9981
TT
472 rq->mkey_be = cpu_to_be32(rq->umr_mkey.key);
473
474 err = mlx5e_rq_alloc_mpwqe_info(rq, c);
475 if (err)
08a762ce 476 goto err_rq_mkey;
461017cb 477 break;
99cbfa93
TT
478 default: /* MLX5_WQ_TYPE_CYCLIC */
479 err = mlx5_wq_cyc_create(mdev, &rqp->wq, rqc_wq, &rq->wqe.wq,
480 &rq->wq_ctrl);
422d4c40 481 if (err)
08a762ce 482 goto err_rq_xdp;
422d4c40
TT
483
484 rq->wqe.wq.db = &rq->wqe.wq.db[MLX5_RCV_DBR];
485
99cbfa93 486 wq_sz = mlx5_wq_cyc_get_size(&rq->wqe.wq);
422d4c40 487
069d1146 488 rq->wqe.info = rqp->frags_info;
d628ee4f
JDB
489 rq->buff.frame0_sz = rq->wqe.info.arr[0].frag_stride;
490
069d1146 491 rq->wqe.frags =
84ca176b
KC
492 kvzalloc_node(array_size(sizeof(*rq->wqe.frags),
493 (wq_sz << rq->wqe.info.log_num_frags)),
069d1146 494 GFP_KERNEL, cpu_to_node(c->cpu));
47a6ca3f
WY
495 if (!rq->wqe.frags) {
496 err = -ENOMEM;
08a762ce 497 goto err_rq_wq_destroy;
47a6ca3f 498 }
069d1146 499
83b2fd64 500 err = mlx5e_init_di_list(rq, wq_sz, c->cpu);
069d1146 501 if (err)
08a762ce 502 goto err_rq_frags;
db05815b 503
7e426671 504 rq->mkey_be = c->mkey_be;
461017cb 505 }
f62b8bb8 506
5adf4c47
TT
507 err = mlx5e_rq_set_handlers(rq, params, xsk);
508 if (err)
08a762ce 509 goto err_free_by_rq_type;
5adf4c47 510
db05815b 511 if (xsk) {
db05815b 512 err = xdp_rxq_info_reg_mem_model(&rq->xdp_rxq,
39d6443c 513 MEM_TYPE_XSK_BUFF_POOL, NULL);
c4655761 514 xsk_pool_set_rxq_info(rq->xsk_pool, &rq->xdp_rxq);
db05815b
MM
515 } else {
516 /* Create a page_pool and register it with rxq */
517 pp_params.order = 0;
518 pp_params.flags = 0; /* No-internal DMA mapping in page_pool */
519 pp_params.pool_size = pool_size;
520 pp_params.nid = cpu_to_node(c->cpu);
521 pp_params.dev = c->pdev;
522 pp_params.dma_dir = rq->buff.map_dir;
523
524 /* page_pool can be used even when there is no rq->xdp_prog,
525 * given page_pool does not handle DMA mapping there is no
526 * required state to clear. And page_pool gracefully handle
527 * elevated refcnt.
528 */
529 rq->page_pool = page_pool_create(&pp_params);
530 if (IS_ERR(rq->page_pool)) {
531 err = PTR_ERR(rq->page_pool);
532 rq->page_pool = NULL;
08a762ce 533 goto err_free_by_rq_type;
db05815b
MM
534 }
535 err = xdp_rxq_info_reg_mem_model(&rq->xdp_rxq,
536 MEM_TYPE_PAGE_POOL, rq->page_pool);
84f5e3fb 537 }
db05815b 538 if (err)
08a762ce 539 goto err_free_by_rq_type;
84f5e3fb 540
f62b8bb8 541 for (i = 0; i < wq_sz; i++) {
4c2af5cc 542 if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) {
99cbfa93 543 struct mlx5e_rx_wqe_ll *wqe =
422d4c40 544 mlx5_wq_ll_get_wqe(&rq->mpwqe.wq, i);
069d1146
TT
545 u32 byte_count =
546 rq->mpwqe.num_strides << rq->mpwqe.log_stride_sz;
b8a98a4c 547 u64 dma_offset = mlx5e_get_mpwqe_offset(rq, i);
4c2af5cc 548
99cbfa93
TT
549 wqe->data[0].addr = cpu_to_be64(dma_offset + rq->buff.headroom);
550 wqe->data[0].byte_count = cpu_to_be32(byte_count);
551 wqe->data[0].lkey = rq->mkey_be;
422d4c40 552 } else {
99cbfa93
TT
553 struct mlx5e_rx_wqe_cyc *wqe =
554 mlx5_wq_cyc_get_wqe(&rq->wqe.wq, i);
069d1146
TT
555 int f;
556
557 for (f = 0; f < rq->wqe.info.num_frags; f++) {
558 u32 frag_size = rq->wqe.info.arr[f].frag_size |
559 MLX5_HW_START_PADDING;
560
561 wqe->data[f].byte_count = cpu_to_be32(frag_size);
562 wqe->data[f].lkey = rq->mkey_be;
563 }
564 /* check if num_frags is not a pow of two */
565 if (rq->wqe.info.num_frags < (1 << rq->wqe.info.log_num_frags)) {
566 wqe->data[f].byte_count = 0;
567 wqe->data[f].lkey = cpu_to_be32(MLX5_INVALID_LKEY);
568 wqe->data[f].addr = 0;
569 }
422d4c40 570 }
f62b8bb8
AV
571 }
572
9a317425
AG
573 INIT_WORK(&rq->dim.work, mlx5e_rx_dim_work);
574
575 switch (params->rx_cq_moderation.cq_period_mode) {
576 case MLX5_CQ_PERIOD_MODE_START_FROM_CQE:
c002bd52 577 rq->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_CQE;
9a317425
AG
578 break;
579 case MLX5_CQ_PERIOD_MODE_START_FROM_EQE:
580 default:
c002bd52 581 rq->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
9a317425
AG
582 }
583
4415a031
TT
584 rq->page_cache.head = 0;
585 rq->page_cache.tail = 0;
586
f62b8bb8
AV
587 return 0;
588
08a762ce 589err_free_by_rq_type:
069d1146
TT
590 switch (rq->wq_type) {
591 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
ca11b798 592 kvfree(rq->mpwqe.info);
08a762ce 593err_rq_mkey:
069d1146 594 mlx5_core_destroy_mkey(mdev, &rq->umr_mkey);
c3c94023
AL
595err_rq_drop_page:
596 mlx5e_free_mpwqe_rq_drop_page(rq);
069d1146
TT
597 break;
598 default: /* MLX5_WQ_TYPE_CYCLIC */
069d1146 599 mlx5e_free_di_list(rq);
08a762ce
AL
600err_rq_frags:
601 kvfree(rq->wqe.frags);
069d1146 602 }
f62b8bb8 603err_rq_wq_destroy:
08a762ce
AL
604 mlx5_wq_destroy(&rq->wq_ctrl);
605err_rq_xdp:
606 xdp_rxq_info_unreg(&rq->xdp_rxq);
607err_rq_xdp_prog:
fe45386a
MM
608 if (params->xdp_prog)
609 bpf_prog_put(params->xdp_prog);
f62b8bb8
AV
610
611 return err;
612}
613
3b77235b 614static void mlx5e_free_rq(struct mlx5e_rq *rq)
f62b8bb8 615{
fe45386a
MM
616 struct mlx5e_channel *c = rq->channel;
617 struct bpf_prog *old_prog = NULL;
4415a031
TT
618 int i;
619
fe45386a
MM
620 /* drop_rq has neither channel nor xdp_prog. */
621 if (c)
622 old_prog = rcu_dereference_protected(rq->xdp_prog,
623 lockdep_is_held(&c->priv->state_lock));
624 if (old_prog)
625 bpf_prog_put(old_prog);
86994156 626
461017cb
TT
627 switch (rq->wq_type) {
628 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
ca11b798 629 kvfree(rq->mpwqe.info);
a43b25da 630 mlx5_core_destroy_mkey(rq->mdev, &rq->umr_mkey);
c3c94023 631 mlx5e_free_mpwqe_rq_drop_page(rq);
461017cb 632 break;
99cbfa93 633 default: /* MLX5_WQ_TYPE_CYCLIC */
069d1146
TT
634 kvfree(rq->wqe.frags);
635 mlx5e_free_di_list(rq);
461017cb
TT
636 }
637
4415a031
TT
638 for (i = rq->page_cache.head; i != rq->page_cache.tail;
639 i = (i + 1) & (MLX5E_CACHE_SIZE - 1)) {
640 struct mlx5e_dma_info *dma_info = &rq->page_cache.page_cache[i];
641
db05815b
MM
642 /* With AF_XDP, page_cache is not used, so this loop is not
643 * entered, and it's safe to call mlx5e_page_release_dynamic
644 * directly.
645 */
646 mlx5e_page_release_dynamic(rq, dma_info, false);
4415a031 647 }
29b006a6
JDB
648
649 xdp_rxq_info_unreg(&rq->xdp_rxq);
1da4bbef 650 page_pool_destroy(rq->page_pool);
f62b8bb8
AV
651 mlx5_wq_destroy(&rq->wq_ctrl);
652}
653
6a9764ef
SM
654static int mlx5e_create_rq(struct mlx5e_rq *rq,
655 struct mlx5e_rq_param *param)
f62b8bb8 656{
a43b25da 657 struct mlx5_core_dev *mdev = rq->mdev;
f62b8bb8
AV
658
659 void *in;
660 void *rqc;
661 void *wq;
662 int inlen;
663 int err;
664
665 inlen = MLX5_ST_SZ_BYTES(create_rq_in) +
666 sizeof(u64) * rq->wq_ctrl.buf.npages;
1b9a07ee 667 in = kvzalloc(inlen, GFP_KERNEL);
f62b8bb8
AV
668 if (!in)
669 return -ENOMEM;
670
671 rqc = MLX5_ADDR_OF(create_rq_in, in, ctx);
672 wq = MLX5_ADDR_OF(rqc, rqc, wq);
673
674 memcpy(rqc, param->rqc, sizeof(param->rqc));
675
97de9f31 676 MLX5_SET(rqc, rqc, cqn, rq->cq.mcq.cqn);
f62b8bb8 677 MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RST);
f62b8bb8 678 MLX5_SET(wq, wq, log_wq_pg_sz, rq->wq_ctrl.buf.page_shift -
68cdf5d6 679 MLX5_ADAPTER_PAGE_SHIFT);
f62b8bb8
AV
680 MLX5_SET64(wq, wq, dbr_addr, rq->wq_ctrl.db.dma);
681
3a2f7033
TT
682 mlx5_fill_page_frag_array(&rq->wq_ctrl.buf,
683 (__be64 *)MLX5_ADDR_OF(wq, wq, pas));
f62b8bb8 684
7db22ffb 685 err = mlx5_core_create_rq(mdev, in, inlen, &rq->rqn);
f62b8bb8
AV
686
687 kvfree(in);
688
689 return err;
690}
691
be5323c8 692int mlx5e_modify_rq_state(struct mlx5e_rq *rq, int curr_state, int next_state)
f62b8bb8 693{
7cbaf9a3 694 struct mlx5_core_dev *mdev = rq->mdev;
f62b8bb8
AV
695
696 void *in;
697 void *rqc;
698 int inlen;
699 int err;
700
701 inlen = MLX5_ST_SZ_BYTES(modify_rq_in);
1b9a07ee 702 in = kvzalloc(inlen, GFP_KERNEL);
f62b8bb8
AV
703 if (!in)
704 return -ENOMEM;
705
5ee090ed
AL
706 if (curr_state == MLX5_RQC_STATE_RST && next_state == MLX5_RQC_STATE_RDY)
707 mlx5e_rqwq_reset(rq);
708
f62b8bb8
AV
709 rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx);
710
711 MLX5_SET(modify_rq_in, in, rq_state, curr_state);
712 MLX5_SET(rqc, rqc, state, next_state);
713
e0b4b472 714 err = mlx5_core_modify_rq(mdev, rq->rqn, in);
f62b8bb8
AV
715
716 kvfree(in);
717
718 return err;
719}
720
102722fc
GE
721static int mlx5e_modify_rq_scatter_fcs(struct mlx5e_rq *rq, bool enable)
722{
723 struct mlx5e_channel *c = rq->channel;
724 struct mlx5e_priv *priv = c->priv;
725 struct mlx5_core_dev *mdev = priv->mdev;
726
727 void *in;
728 void *rqc;
729 int inlen;
730 int err;
731
732 inlen = MLX5_ST_SZ_BYTES(modify_rq_in);
1b9a07ee 733 in = kvzalloc(inlen, GFP_KERNEL);
102722fc
GE
734 if (!in)
735 return -ENOMEM;
736
737 rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx);
738
739 MLX5_SET(modify_rq_in, in, rq_state, MLX5_RQC_STATE_RDY);
740 MLX5_SET64(modify_rq_in, in, modify_bitmask,
741 MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_SCATTER_FCS);
742 MLX5_SET(rqc, rqc, scatter_fcs, enable);
743 MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RDY);
744
e0b4b472 745 err = mlx5_core_modify_rq(mdev, rq->rqn, in);
102722fc
GE
746
747 kvfree(in);
748
749 return err;
750}
751
36350114
GP
752static int mlx5e_modify_rq_vsd(struct mlx5e_rq *rq, bool vsd)
753{
754 struct mlx5e_channel *c = rq->channel;
a43b25da 755 struct mlx5_core_dev *mdev = c->mdev;
36350114
GP
756 void *in;
757 void *rqc;
758 int inlen;
759 int err;
760
761 inlen = MLX5_ST_SZ_BYTES(modify_rq_in);
1b9a07ee 762 in = kvzalloc(inlen, GFP_KERNEL);
36350114
GP
763 if (!in)
764 return -ENOMEM;
765
766 rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx);
767
768 MLX5_SET(modify_rq_in, in, rq_state, MLX5_RQC_STATE_RDY);
83b502a1
AV
769 MLX5_SET64(modify_rq_in, in, modify_bitmask,
770 MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_VSD);
36350114
GP
771 MLX5_SET(rqc, rqc, vsd, vsd);
772 MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RDY);
773
e0b4b472 774 err = mlx5_core_modify_rq(mdev, rq->rqn, in);
36350114
GP
775
776 kvfree(in);
777
778 return err;
779}
780
3b77235b 781static void mlx5e_destroy_rq(struct mlx5e_rq *rq)
f62b8bb8 782{
a43b25da 783 mlx5_core_destroy_rq(rq->mdev, rq->rqn);
f62b8bb8
AV
784}
785
db05815b 786int mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq *rq, int wait_time)
f62b8bb8 787{
1e7477ae 788 unsigned long exp_time = jiffies + msecs_to_jiffies(wait_time);
f62b8bb8 789 struct mlx5e_channel *c = rq->channel;
a43b25da 790
422d4c40 791 u16 min_wqes = mlx5_min_rx_wqes(rq->wq_type, mlx5e_rqwq_get_size(rq));
f62b8bb8 792
1e7477ae 793 do {
422d4c40 794 if (mlx5e_rqwq_get_cur_sz(rq) >= min_wqes)
f62b8bb8
AV
795 return 0;
796
797 msleep(20);
1e7477ae
EBE
798 } while (time_before(jiffies, exp_time));
799
800 netdev_warn(c->netdev, "Failed to get min RX wqes on Channel[%d] RQN[0x%x] wq cur_sz(%d) min_rx_wqes(%d)\n",
422d4c40 801 c->ix, rq->rqn, mlx5e_rqwq_get_cur_sz(rq), min_wqes);
f62b8bb8 802
32c57fb2 803 mlx5e_reporter_rx_timeout(rq);
f62b8bb8
AV
804 return -ETIMEDOUT;
805}
806
e239c6d6
AL
807void mlx5e_free_rx_in_progress_descs(struct mlx5e_rq *rq)
808{
809 struct mlx5_wq_ll *wq;
810 u16 head;
811 int i;
812
813 if (rq->wq_type != MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ)
814 return;
815
816 wq = &rq->mpwqe.wq;
817 head = wq->head;
818
819 /* Outstanding UMR WQEs (in progress) start at wq->head */
820 for (i = 0; i < rq->mpwqe.umr_in_progress; i++) {
821 rq->dealloc_wqe(rq, head);
822 head = mlx5_wq_ll_get_wqe_next_ix(wq, head);
823 }
824
825 rq->mpwqe.actual_wq_head = wq->head;
826 rq->mpwqe.umr_in_progress = 0;
827 rq->mpwqe.umr_completed = 0;
828}
829
be5323c8 830void mlx5e_free_rx_descs(struct mlx5e_rq *rq)
f2fde18c 831{
f2fde18c
SM
832 __be16 wqe_ix_be;
833 u16 wqe_ix;
834
422d4c40
TT
835 if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) {
836 struct mlx5_wq_ll *wq = &rq->mpwqe.wq;
837
e239c6d6 838 mlx5e_free_rx_in_progress_descs(rq);
422d4c40
TT
839
840 while (!mlx5_wq_ll_is_empty(wq)) {
99cbfa93 841 struct mlx5e_rx_wqe_ll *wqe;
422d4c40
TT
842
843 wqe_ix_be = *wq->tail_next;
844 wqe_ix = be16_to_cpu(wqe_ix_be);
845 wqe = mlx5_wq_ll_get_wqe(wq, wqe_ix);
846 rq->dealloc_wqe(rq, wqe_ix);
847 mlx5_wq_ll_pop(wq, wqe_ix_be,
848 &wqe->next.next_wqe_index);
849 }
850 } else {
99cbfa93 851 struct mlx5_wq_cyc *wq = &rq->wqe.wq;
422d4c40 852
99cbfa93
TT
853 while (!mlx5_wq_cyc_is_empty(wq)) {
854 wqe_ix = mlx5_wq_cyc_get_tail(wq);
422d4c40 855 rq->dealloc_wqe(rq, wqe_ix);
99cbfa93 856 mlx5_wq_cyc_pop(wq);
422d4c40 857 }
accd5883 858 }
069d1146 859
f2fde18c
SM
860}
861
db05815b
MM
862int mlx5e_open_rq(struct mlx5e_channel *c, struct mlx5e_params *params,
863 struct mlx5e_rq_param *param, struct mlx5e_xsk_param *xsk,
1742b3d5 864 struct xsk_buff_pool *xsk_pool, struct mlx5e_rq *rq)
f62b8bb8
AV
865{
866 int err;
867
1742b3d5 868 err = mlx5e_alloc_rq(c, params, xsk, xsk_pool, param, rq);
f62b8bb8
AV
869 if (err)
870 return err;
871
3b77235b 872 err = mlx5e_create_rq(rq, param);
f62b8bb8 873 if (err)
3b77235b 874 goto err_free_rq;
f62b8bb8 875
36350114 876 err = mlx5e_modify_rq_state(rq, MLX5_RQC_STATE_RST, MLX5_RQC_STATE_RDY);
f62b8bb8 877 if (err)
3b77235b 878 goto err_destroy_rq;
f62b8bb8 879
a2907436
TT
880 if (mlx5e_is_tls_on(c->priv) && !mlx5_accel_is_ktls_device(c->mdev))
881 __set_bit(MLX5E_RQ_STATE_FPGA_TLS, &c->rq.state); /* must be FPGA */
882
db849faa
SM
883 if (MLX5_CAP_ETH(c->mdev, cqe_checksum_full))
884 __set_bit(MLX5E_RQ_STATE_CSUM_FULL, &c->rq.state);
885
9a317425 886 if (params->rx_dim_enabled)
af5a6c93 887 __set_bit(MLX5E_RQ_STATE_AM, &c->rq.state);
cb3c7fd4 888
5d0bb3ba
SM
889 /* We disable csum_complete when XDP is enabled since
890 * XDP programs might manipulate packets which will render
891 * skb->checksum incorrect.
892 */
893 if (MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_NO_CSUM_COMPLETE) || c->xdp)
b856df28
OG
894 __set_bit(MLX5E_RQ_STATE_NO_CSUM_COMPLETE, &c->rq.state);
895
b7cf0806
OL
896 /* For CQE compression on striding RQ, use stride index provided by
897 * HW if capability is supported.
898 */
899 if (MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_STRIDING_RQ) &&
900 MLX5_CAP_GEN(c->mdev, mini_cqe_resp_stride_index))
901 __set_bit(MLX5E_RQ_STATE_MINI_CQE_HW_STRIDX, &c->rq.state);
902
f62b8bb8
AV
903 return 0;
904
f62b8bb8
AV
905err_destroy_rq:
906 mlx5e_destroy_rq(rq);
3b77235b
SM
907err_free_rq:
908 mlx5e_free_rq(rq);
f62b8bb8
AV
909
910 return err;
911}
912
be5323c8 913void mlx5e_activate_rq(struct mlx5e_rq *rq)
acc6c595 914{
acc6c595 915 set_bit(MLX5E_RQ_STATE_ENABLED, &rq->state);
63d26b49 916 mlx5e_trigger_irq(&rq->channel->icosq);
acc6c595
SM
917}
918
db05815b 919void mlx5e_deactivate_rq(struct mlx5e_rq *rq)
f62b8bb8 920{
c0f1147d 921 clear_bit(MLX5E_RQ_STATE_ENABLED, &rq->state);
9c25a22d 922 synchronize_rcu(); /* Sync with NAPI to prevent mlx5e_post_rx_wqes. */
acc6c595 923}
cb3c7fd4 924
db05815b 925void mlx5e_close_rq(struct mlx5e_rq *rq)
acc6c595 926{
9a317425 927 cancel_work_sync(&rq->dim.work);
be5323c8 928 cancel_work_sync(&rq->channel->icosq.recover_work);
8276ea13 929 cancel_work_sync(&rq->recover_work);
f62b8bb8 930 mlx5e_destroy_rq(rq);
3b77235b
SM
931 mlx5e_free_rx_descs(rq);
932 mlx5e_free_rq(rq);
f62b8bb8
AV
933}
934
31391048 935static void mlx5e_free_xdpsq_db(struct mlx5e_xdpsq *sq)
b5503b99 936{
fea28dd6 937 kvfree(sq->db.xdpi_fifo.xi);
1feeab80 938 kvfree(sq->db.wqe_info);
fea28dd6
TT
939}
940
941static int mlx5e_alloc_xdpsq_fifo(struct mlx5e_xdpsq *sq, int numa)
942{
943 struct mlx5e_xdp_info_fifo *xdpi_fifo = &sq->db.xdpi_fifo;
944 int wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
945 int dsegs_per_wq = wq_sz * MLX5_SEND_WQEBB_NUM_DS;
946
947 xdpi_fifo->xi = kvzalloc_node(sizeof(*xdpi_fifo->xi) * dsegs_per_wq,
948 GFP_KERNEL, numa);
949 if (!xdpi_fifo->xi)
950 return -ENOMEM;
951
952 xdpi_fifo->pc = &sq->xdpi_fifo_pc;
953 xdpi_fifo->cc = &sq->xdpi_fifo_cc;
954 xdpi_fifo->mask = dsegs_per_wq - 1;
955
956 return 0;
b5503b99
SM
957}
958
31391048 959static int mlx5e_alloc_xdpsq_db(struct mlx5e_xdpsq *sq, int numa)
b5503b99 960{
1feeab80 961 int wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
fea28dd6 962 int err;
b5503b99 963
1feeab80
TT
964 sq->db.wqe_info = kvzalloc_node(sizeof(*sq->db.wqe_info) * wq_sz,
965 GFP_KERNEL, numa);
966 if (!sq->db.wqe_info)
967 return -ENOMEM;
968
fea28dd6
TT
969 err = mlx5e_alloc_xdpsq_fifo(sq, numa);
970 if (err) {
31391048 971 mlx5e_free_xdpsq_db(sq);
fea28dd6 972 return err;
b5503b99
SM
973 }
974
975 return 0;
976}
977
31391048 978static int mlx5e_alloc_xdpsq(struct mlx5e_channel *c,
6a9764ef 979 struct mlx5e_params *params,
1742b3d5 980 struct xsk_buff_pool *xsk_pool,
31391048 981 struct mlx5e_sq_param *param,
58b99ee3
TT
982 struct mlx5e_xdpsq *sq,
983 bool is_redirect)
31391048
SM
984{
985 void *sqc_wq = MLX5_ADDR_OF(sqc, param->sqc, wq);
a43b25da 986 struct mlx5_core_dev *mdev = c->mdev;
ddf385e3 987 struct mlx5_wq_cyc *wq = &sq->wq;
31391048
SM
988 int err;
989
990 sq->pdev = c->pdev;
991 sq->mkey_be = c->mkey_be;
992 sq->channel = c;
993 sq->uar_map = mdev->mlx5e_res.bfreg.map;
6a9764ef 994 sq->min_inline_mode = params->tx_min_inline_mode;
c94e4f11 995 sq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);
1742b3d5 996 sq->xsk_pool = xsk_pool;
db05815b 997
1742b3d5 998 sq->stats = sq->xsk_pool ?
db05815b
MM
999 &c->priv->channel_stats[c->ix].xsksq :
1000 is_redirect ?
1001 &c->priv->channel_stats[c->ix].xdpsq :
1002 &c->priv->channel_stats[c->ix].rq_xdpsq;
31391048 1003
231243c8 1004 param->wq.db_numa_node = cpu_to_node(c->cpu);
ddf385e3 1005 err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, wq, &sq->wq_ctrl);
31391048
SM
1006 if (err)
1007 return err;
ddf385e3 1008 wq->db = &wq->db[MLX5_SND_DBR];
31391048 1009
231243c8 1010 err = mlx5e_alloc_xdpsq_db(sq, cpu_to_node(c->cpu));
31391048
SM
1011 if (err)
1012 goto err_sq_wq_destroy;
1013
1014 return 0;
1015
1016err_sq_wq_destroy:
1017 mlx5_wq_destroy(&sq->wq_ctrl);
1018
1019 return err;
1020}
1021
1022static void mlx5e_free_xdpsq(struct mlx5e_xdpsq *sq)
1023{
1024 mlx5e_free_xdpsq_db(sq);
1025 mlx5_wq_destroy(&sq->wq_ctrl);
1026}
1027
1028static void mlx5e_free_icosq_db(struct mlx5e_icosq *sq)
f62b8bb8 1029{
7d42c8e9 1030 kvfree(sq->db.wqe_info);
f62b8bb8
AV
1031}
1032
31391048 1033static int mlx5e_alloc_icosq_db(struct mlx5e_icosq *sq, int numa)
f10b7cc7 1034{
fd9b4be8 1035 int wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
7d42c8e9 1036 size_t size;
f10b7cc7 1037
7d42c8e9
MM
1038 size = array_size(wq_sz, sizeof(*sq->db.wqe_info));
1039 sq->db.wqe_info = kvzalloc_node(size, GFP_KERNEL, numa);
1040 if (!sq->db.wqe_info)
f10b7cc7
SM
1041 return -ENOMEM;
1042
1043 return 0;
1044}
1045
be5323c8
AL
1046static void mlx5e_icosq_err_cqe_work(struct work_struct *recover_work)
1047{
1048 struct mlx5e_icosq *sq = container_of(recover_work, struct mlx5e_icosq,
1049 recover_work);
1050
1051 mlx5e_reporter_icosq_cqe_err(sq);
1052}
1053
31391048 1054static int mlx5e_alloc_icosq(struct mlx5e_channel *c,
31391048
SM
1055 struct mlx5e_sq_param *param,
1056 struct mlx5e_icosq *sq)
f10b7cc7 1057{
31391048 1058 void *sqc_wq = MLX5_ADDR_OF(sqc, param->sqc, wq);
a43b25da 1059 struct mlx5_core_dev *mdev = c->mdev;
ddf385e3 1060 struct mlx5_wq_cyc *wq = &sq->wq;
31391048 1061 int err;
f10b7cc7 1062
31391048
SM
1063 sq->channel = c;
1064 sq->uar_map = mdev->mlx5e_res.bfreg.map;
f62b8bb8 1065
231243c8 1066 param->wq.db_numa_node = cpu_to_node(c->cpu);
ddf385e3 1067 err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, wq, &sq->wq_ctrl);
31391048
SM
1068 if (err)
1069 return err;
ddf385e3 1070 wq->db = &wq->db[MLX5_SND_DBR];
f62b8bb8 1071
231243c8 1072 err = mlx5e_alloc_icosq_db(sq, cpu_to_node(c->cpu));
31391048
SM
1073 if (err)
1074 goto err_sq_wq_destroy;
1075
be5323c8
AL
1076 INIT_WORK(&sq->recover_work, mlx5e_icosq_err_cqe_work);
1077
f62b8bb8 1078 return 0;
31391048
SM
1079
1080err_sq_wq_destroy:
1081 mlx5_wq_destroy(&sq->wq_ctrl);
1082
1083 return err;
f62b8bb8
AV
1084}
1085
31391048 1086static void mlx5e_free_icosq(struct mlx5e_icosq *sq)
f10b7cc7 1087{
31391048
SM
1088 mlx5e_free_icosq_db(sq);
1089 mlx5_wq_destroy(&sq->wq_ctrl);
f10b7cc7
SM
1090}
1091
31391048 1092static void mlx5e_free_txqsq_db(struct mlx5e_txqsq *sq)
f10b7cc7 1093{
ca11b798 1094 kvfree(sq->db.wqe_info);
338c46c6 1095 kvfree(sq->db.skb_fifo);
ca11b798 1096 kvfree(sq->db.dma_fifo);
f10b7cc7
SM
1097}
1098
31391048 1099static int mlx5e_alloc_txqsq_db(struct mlx5e_txqsq *sq, int numa)
b5503b99 1100{
31391048
SM
1101 int wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
1102 int df_sz = wq_sz * MLX5_SEND_WQEBB_NUM_DS;
1103
eec4edc9
KC
1104 sq->db.dma_fifo = kvzalloc_node(array_size(df_sz,
1105 sizeof(*sq->db.dma_fifo)),
ca11b798 1106 GFP_KERNEL, numa);
338c46c6
MM
1107 sq->db.skb_fifo = kvzalloc_node(array_size(df_sz,
1108 sizeof(*sq->db.skb_fifo)),
1109 GFP_KERNEL, numa);
eec4edc9
KC
1110 sq->db.wqe_info = kvzalloc_node(array_size(wq_sz,
1111 sizeof(*sq->db.wqe_info)),
ca11b798 1112 GFP_KERNEL, numa);
338c46c6 1113 if (!sq->db.dma_fifo || !sq->db.skb_fifo || !sq->db.wqe_info) {
31391048
SM
1114 mlx5e_free_txqsq_db(sq);
1115 return -ENOMEM;
b5503b99 1116 }
31391048
SM
1117
1118 sq->dma_fifo_mask = df_sz - 1;
338c46c6 1119 sq->skb_fifo_mask = df_sz - 1;
31391048
SM
1120
1121 return 0;
b5503b99
SM
1122}
1123
de8650a8 1124static void mlx5e_tx_err_cqe_work(struct work_struct *recover_work);
31391048 1125static int mlx5e_alloc_txqsq(struct mlx5e_channel *c,
acc6c595 1126 int txq_ix,
6a9764ef 1127 struct mlx5e_params *params,
31391048 1128 struct mlx5e_sq_param *param,
05909bab
EBE
1129 struct mlx5e_txqsq *sq,
1130 int tc)
f62b8bb8 1131{
31391048 1132 void *sqc_wq = MLX5_ADDR_OF(sqc, param->sqc, wq);
a43b25da 1133 struct mlx5_core_dev *mdev = c->mdev;
ddf385e3 1134 struct mlx5_wq_cyc *wq = &sq->wq;
f62b8bb8
AV
1135 int err;
1136
f10b7cc7 1137 sq->pdev = c->pdev;
a43b25da 1138 sq->tstamp = c->tstamp;
7c39afb3 1139 sq->clock = &mdev->clock;
f10b7cc7
SM
1140 sq->mkey_be = c->mkey_be;
1141 sq->channel = c;
57c70d87 1142 sq->ch_ix = c->ix;
acc6c595 1143 sq->txq_ix = txq_ix;
aff26157 1144 sq->uar_map = mdev->mlx5e_res.bfreg.map;
6a9764ef 1145 sq->min_inline_mode = params->tx_min_inline_mode;
84d1bb2b 1146 sq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);
05909bab 1147 sq->stats = &c->priv->channel_stats[c->ix].sq[tc];
de8650a8 1148 INIT_WORK(&sq->recover_work, mlx5e_tx_err_cqe_work);
b431302e
TT
1149 if (!MLX5_CAP_ETH(mdev, wqe_vlan_insert))
1150 set_bit(MLX5E_SQ_STATE_VLAN_NEED_L2_INLINE, &sq->state);
2ac9cfe7
IT
1151 if (MLX5_IPSEC_DEV(c->priv->mdev))
1152 set_bit(MLX5E_SQ_STATE_IPSEC, &sq->state);
5ffb4d85 1153 if (mlx5_accel_is_tls_device(c->priv->mdev))
bf239741 1154 set_bit(MLX5E_SQ_STATE_TLS, &sq->state);
5af75c74
MM
1155 if (param->is_mpw)
1156 set_bit(MLX5E_SQ_STATE_MPWQE, &sq->state);
579524c6 1157 sq->stop_room = param->stop_room;
f10b7cc7 1158
231243c8 1159 param->wq.db_numa_node = cpu_to_node(c->cpu);
ddf385e3 1160 err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, wq, &sq->wq_ctrl);
f62b8bb8 1161 if (err)
aff26157 1162 return err;
ddf385e3 1163 wq->db = &wq->db[MLX5_SND_DBR];
f62b8bb8 1164
231243c8 1165 err = mlx5e_alloc_txqsq_db(sq, cpu_to_node(c->cpu));
7ec0bb22 1166 if (err)
f62b8bb8
AV
1167 goto err_sq_wq_destroy;
1168
cbce4f44
TG
1169 INIT_WORK(&sq->dim.work, mlx5e_tx_dim_work);
1170 sq->dim.mode = params->tx_cq_moderation.cq_period_mode;
1171
f62b8bb8
AV
1172 return 0;
1173
1174err_sq_wq_destroy:
1175 mlx5_wq_destroy(&sq->wq_ctrl);
1176
f62b8bb8
AV
1177 return err;
1178}
1179
31391048 1180static void mlx5e_free_txqsq(struct mlx5e_txqsq *sq)
f62b8bb8 1181{
31391048 1182 mlx5e_free_txqsq_db(sq);
f62b8bb8 1183 mlx5_wq_destroy(&sq->wq_ctrl);
f62b8bb8
AV
1184}
1185
33ad9711
SM
1186struct mlx5e_create_sq_param {
1187 struct mlx5_wq_ctrl *wq_ctrl;
1188 u32 cqn;
1189 u32 tisn;
1190 u8 tis_lst_sz;
1191 u8 min_inline_mode;
1192};
1193
a43b25da 1194static int mlx5e_create_sq(struct mlx5_core_dev *mdev,
33ad9711
SM
1195 struct mlx5e_sq_param *param,
1196 struct mlx5e_create_sq_param *csp,
1197 u32 *sqn)
f62b8bb8 1198{
f62b8bb8
AV
1199 void *in;
1200 void *sqc;
1201 void *wq;
1202 int inlen;
1203 int err;
1204
1205 inlen = MLX5_ST_SZ_BYTES(create_sq_in) +
33ad9711 1206 sizeof(u64) * csp->wq_ctrl->buf.npages;
1b9a07ee 1207 in = kvzalloc(inlen, GFP_KERNEL);
f62b8bb8
AV
1208 if (!in)
1209 return -ENOMEM;
1210
1211 sqc = MLX5_ADDR_OF(create_sq_in, in, ctx);
1212 wq = MLX5_ADDR_OF(sqc, sqc, wq);
1213
1214 memcpy(sqc, param->sqc, sizeof(param->sqc));
33ad9711
SM
1215 MLX5_SET(sqc, sqc, tis_lst_sz, csp->tis_lst_sz);
1216 MLX5_SET(sqc, sqc, tis_num_0, csp->tisn);
1217 MLX5_SET(sqc, sqc, cqn, csp->cqn);
a6f402e4
SM
1218
1219 if (MLX5_CAP_ETH(mdev, wqe_inline_mode) == MLX5_CAP_INLINE_MODE_VPORT_CONTEXT)
33ad9711 1220 MLX5_SET(sqc, sqc, min_wqe_inline_mode, csp->min_inline_mode);
a6f402e4 1221
33ad9711 1222 MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RST);
db75373c 1223 MLX5_SET(sqc, sqc, flush_in_error_en, 1);
f62b8bb8
AV
1224
1225 MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC);
a43b25da 1226 MLX5_SET(wq, wq, uar_page, mdev->mlx5e_res.bfreg.index);
33ad9711 1227 MLX5_SET(wq, wq, log_wq_pg_sz, csp->wq_ctrl->buf.page_shift -
68cdf5d6 1228 MLX5_ADAPTER_PAGE_SHIFT);
33ad9711 1229 MLX5_SET64(wq, wq, dbr_addr, csp->wq_ctrl->db.dma);
f62b8bb8 1230
3a2f7033
TT
1231 mlx5_fill_page_frag_array(&csp->wq_ctrl->buf,
1232 (__be64 *)MLX5_ADDR_OF(wq, wq, pas));
f62b8bb8 1233
33ad9711 1234 err = mlx5_core_create_sq(mdev, in, inlen, sqn);
f62b8bb8
AV
1235
1236 kvfree(in);
1237
1238 return err;
1239}
1240
de8650a8
EBE
1241int mlx5e_modify_sq(struct mlx5_core_dev *mdev, u32 sqn,
1242 struct mlx5e_modify_sq_param *p)
f62b8bb8 1243{
f62b8bb8
AV
1244 void *in;
1245 void *sqc;
1246 int inlen;
1247 int err;
1248
1249 inlen = MLX5_ST_SZ_BYTES(modify_sq_in);
1b9a07ee 1250 in = kvzalloc(inlen, GFP_KERNEL);
f62b8bb8
AV
1251 if (!in)
1252 return -ENOMEM;
1253
1254 sqc = MLX5_ADDR_OF(modify_sq_in, in, ctx);
1255
33ad9711
SM
1256 MLX5_SET(modify_sq_in, in, sq_state, p->curr_state);
1257 MLX5_SET(sqc, sqc, state, p->next_state);
1258 if (p->rl_update && p->next_state == MLX5_SQC_STATE_RDY) {
507f0c81 1259 MLX5_SET64(modify_sq_in, in, modify_bitmask, 1);
33ad9711 1260 MLX5_SET(sqc, sqc, packet_pacing_rate_limit_index, p->rl_index);
507f0c81 1261 }
f62b8bb8 1262
e0b4b472 1263 err = mlx5_core_modify_sq(mdev, sqn, in);
f62b8bb8
AV
1264
1265 kvfree(in);
1266
1267 return err;
1268}
1269
a43b25da 1270static void mlx5e_destroy_sq(struct mlx5_core_dev *mdev, u32 sqn)
33ad9711 1271{
a43b25da 1272 mlx5_core_destroy_sq(mdev, sqn);
f62b8bb8
AV
1273}
1274
a43b25da 1275static int mlx5e_create_sq_rdy(struct mlx5_core_dev *mdev,
31391048
SM
1276 struct mlx5e_sq_param *param,
1277 struct mlx5e_create_sq_param *csp,
1278 u32 *sqn)
f62b8bb8 1279{
33ad9711 1280 struct mlx5e_modify_sq_param msp = {0};
31391048
SM
1281 int err;
1282
a43b25da 1283 err = mlx5e_create_sq(mdev, param, csp, sqn);
31391048
SM
1284 if (err)
1285 return err;
1286
1287 msp.curr_state = MLX5_SQC_STATE_RST;
1288 msp.next_state = MLX5_SQC_STATE_RDY;
a43b25da 1289 err = mlx5e_modify_sq(mdev, *sqn, &msp);
31391048 1290 if (err)
a43b25da 1291 mlx5e_destroy_sq(mdev, *sqn);
31391048
SM
1292
1293 return err;
1294}
1295
7f859ecf
SM
1296static int mlx5e_set_sq_maxrate(struct net_device *dev,
1297 struct mlx5e_txqsq *sq, u32 rate);
1298
31391048 1299static int mlx5e_open_txqsq(struct mlx5e_channel *c,
a43b25da 1300 u32 tisn,
acc6c595 1301 int txq_ix,
6a9764ef 1302 struct mlx5e_params *params,
31391048 1303 struct mlx5e_sq_param *param,
05909bab
EBE
1304 struct mlx5e_txqsq *sq,
1305 int tc)
31391048
SM
1306{
1307 struct mlx5e_create_sq_param csp = {};
7f859ecf 1308 u32 tx_rate;
f62b8bb8
AV
1309 int err;
1310
05909bab 1311 err = mlx5e_alloc_txqsq(c, txq_ix, params, param, sq, tc);
f62b8bb8
AV
1312 if (err)
1313 return err;
1314
a43b25da 1315 csp.tisn = tisn;
31391048 1316 csp.tis_lst_sz = 1;
33ad9711
SM
1317 csp.cqn = sq->cq.mcq.cqn;
1318 csp.wq_ctrl = &sq->wq_ctrl;
1319 csp.min_inline_mode = sq->min_inline_mode;
a43b25da 1320 err = mlx5e_create_sq_rdy(c->mdev, param, &csp, &sq->sqn);
f62b8bb8 1321 if (err)
31391048 1322 goto err_free_txqsq;
f62b8bb8 1323
a43b25da 1324 tx_rate = c->priv->tx_rates[sq->txq_ix];
7f859ecf 1325 if (tx_rate)
a43b25da 1326 mlx5e_set_sq_maxrate(c->netdev, sq, tx_rate);
7f859ecf 1327
cbce4f44
TG
1328 if (params->tx_dim_enabled)
1329 sq->state |= BIT(MLX5E_SQ_STATE_AM);
1330
f62b8bb8
AV
1331 return 0;
1332
31391048 1333err_free_txqsq:
31391048 1334 mlx5e_free_txqsq(sq);
f62b8bb8
AV
1335
1336 return err;
1337}
1338
de8650a8 1339void mlx5e_activate_txqsq(struct mlx5e_txqsq *sq)
acc6c595 1340{
a43b25da 1341 sq->txq = netdev_get_tx_queue(sq->channel->netdev, sq->txq_ix);
acc6c595
SM
1342 set_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
1343 netdev_tx_reset_queue(sq->txq);
1344 netif_tx_start_queue(sq->txq);
1345}
1346
de8650a8 1347void mlx5e_tx_disable_queue(struct netdev_queue *txq)
f62b8bb8
AV
1348{
1349 __netif_tx_lock_bh(txq);
1350 netif_tx_stop_queue(txq);
1351 __netif_tx_unlock_bh(txq);
1352}
1353
acc6c595 1354static void mlx5e_deactivate_txqsq(struct mlx5e_txqsq *sq)
f62b8bb8 1355{
ddf385e3 1356 struct mlx5_wq_cyc *wq = &sq->wq;
33ad9711 1357
c0f1147d 1358 clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
9c25a22d 1359 synchronize_rcu(); /* Sync with NAPI to prevent netif_tx_wake_queue. */
29429f33 1360
de8650a8 1361 mlx5e_tx_disable_queue(sq->txq);
f62b8bb8 1362
31391048 1363 /* last doorbell out, godspeed .. */
ddf385e3
TT
1364 if (mlx5e_wqc_has_room_for(wq, sq->cc, sq->pc, 1)) {
1365 u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
31391048 1366 struct mlx5e_tx_wqe *nop;
864b2d71 1367
41a8e4eb
TT
1368 sq->db.wqe_info[pi] = (struct mlx5e_tx_wqe_info) {
1369 .num_wqebbs = 1,
1370 };
0c258dec 1371
ddf385e3
TT
1372 nop = mlx5e_post_nop(wq, sq->sqn, &sq->pc);
1373 mlx5e_notify_hw(wq, sq->pc, sq->uar_map, &nop->ctrl);
29429f33 1374 }
acc6c595
SM
1375}
1376
1377static void mlx5e_close_txqsq(struct mlx5e_txqsq *sq)
1378{
1379 struct mlx5e_channel *c = sq->channel;
a43b25da 1380 struct mlx5_core_dev *mdev = c->mdev;
05d3ac97 1381 struct mlx5_rate_limit rl = {0};
f62b8bb8 1382
fa2bf86b 1383 cancel_work_sync(&sq->dim.work);
de8650a8 1384 cancel_work_sync(&sq->recover_work);
a43b25da 1385 mlx5e_destroy_sq(mdev, sq->sqn);
05d3ac97
BW
1386 if (sq->rate_limit) {
1387 rl.rate = sq->rate_limit;
1388 mlx5_rl_remove_rate(mdev, &rl);
1389 }
31391048
SM
1390 mlx5e_free_txqsq_descs(sq);
1391 mlx5e_free_txqsq(sq);
1392}
1393
de8650a8 1394static void mlx5e_tx_err_cqe_work(struct work_struct *recover_work)
db75373c 1395{
de8650a8
EBE
1396 struct mlx5e_txqsq *sq = container_of(recover_work, struct mlx5e_txqsq,
1397 recover_work);
30e5c2c6 1398
06293ae4 1399 mlx5e_reporter_tx_err_cqe(sq);
db75373c
EBE
1400}
1401
db05815b
MM
1402int mlx5e_open_icosq(struct mlx5e_channel *c, struct mlx5e_params *params,
1403 struct mlx5e_sq_param *param, struct mlx5e_icosq *sq)
31391048
SM
1404{
1405 struct mlx5e_create_sq_param csp = {};
1406 int err;
1407
6a9764ef 1408 err = mlx5e_alloc_icosq(c, param, sq);
31391048
SM
1409 if (err)
1410 return err;
1411
1412 csp.cqn = sq->cq.mcq.cqn;
1413 csp.wq_ctrl = &sq->wq_ctrl;
6a9764ef 1414 csp.min_inline_mode = params->tx_min_inline_mode;
a43b25da 1415 err = mlx5e_create_sq_rdy(c->mdev, param, &csp, &sq->sqn);
31391048
SM
1416 if (err)
1417 goto err_free_icosq;
1418
1419 return 0;
1420
1421err_free_icosq:
31391048
SM
1422 mlx5e_free_icosq(sq);
1423
1424 return err;
1425}
1426
be5323c8 1427void mlx5e_activate_icosq(struct mlx5e_icosq *icosq)
31391048 1428{
9d18b514
AL
1429 set_bit(MLX5E_SQ_STATE_ENABLED, &icosq->state);
1430}
31391048 1431
be5323c8 1432void mlx5e_deactivate_icosq(struct mlx5e_icosq *icosq)
9d18b514 1433{
9d18b514 1434 clear_bit(MLX5E_SQ_STATE_ENABLED, &icosq->state);
9c25a22d 1435 synchronize_rcu(); /* Sync with NAPI. */
9d18b514
AL
1436}
1437
1438void mlx5e_close_icosq(struct mlx5e_icosq *sq)
1439{
1440 struct mlx5e_channel *c = sq->channel;
31391048 1441
a43b25da 1442 mlx5e_destroy_sq(c->mdev, sq->sqn);
1182f365 1443 mlx5e_free_icosq_descs(sq);
31391048
SM
1444 mlx5e_free_icosq(sq);
1445}
1446
db05815b 1447int mlx5e_open_xdpsq(struct mlx5e_channel *c, struct mlx5e_params *params,
1742b3d5 1448 struct mlx5e_sq_param *param, struct xsk_buff_pool *xsk_pool,
db05815b 1449 struct mlx5e_xdpsq *sq, bool is_redirect)
31391048 1450{
31391048 1451 struct mlx5e_create_sq_param csp = {};
31391048 1452 int err;
31391048 1453
1742b3d5 1454 err = mlx5e_alloc_xdpsq(c, params, xsk_pool, param, sq, is_redirect);
31391048
SM
1455 if (err)
1456 return err;
1457
1458 csp.tis_lst_sz = 1;
45f171b1 1459 csp.tisn = c->priv->tisn[c->lag_port][0]; /* tc = 0 */
31391048
SM
1460 csp.cqn = sq->cq.mcq.cqn;
1461 csp.wq_ctrl = &sq->wq_ctrl;
1462 csp.min_inline_mode = sq->min_inline_mode;
1463 set_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
a43b25da 1464 err = mlx5e_create_sq_rdy(c->mdev, param, &csp, &sq->sqn);
31391048
SM
1465 if (err)
1466 goto err_free_xdpsq;
1467
5e0d2eef
TT
1468 mlx5e_set_xmit_fp(sq, param->is_mpw);
1469
1470 if (!param->is_mpw) {
1471 unsigned int ds_cnt = MLX5E_XDP_TX_DS_COUNT;
1472 unsigned int inline_hdr_sz = 0;
1473 int i;
31391048 1474
5e0d2eef
TT
1475 if (sq->min_inline_mode != MLX5_INLINE_MODE_NONE) {
1476 inline_hdr_sz = MLX5E_XDP_MIN_INLINE;
1477 ds_cnt++;
1478 }
1479
1480 /* Pre initialize fixed WQE fields */
1481 for (i = 0; i < mlx5_wq_cyc_get_size(&sq->wq); i++) {
5e0d2eef
TT
1482 struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(&sq->wq, i);
1483 struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
1484 struct mlx5_wqe_eth_seg *eseg = &wqe->eth;
1485 struct mlx5_wqe_data_seg *dseg;
31391048 1486
41a8e4eb
TT
1487 sq->db.wqe_info[i] = (struct mlx5e_xdp_wqe_info) {
1488 .num_wqebbs = 1,
1489 .num_pkts = 1,
1490 };
1491
5e0d2eef
TT
1492 cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt);
1493 eseg->inline_hdr.sz = cpu_to_be16(inline_hdr_sz);
31391048 1494
5e0d2eef
TT
1495 dseg = (struct mlx5_wqe_data_seg *)cseg + (ds_cnt - 1);
1496 dseg->lkey = sq->mkey_be;
5e0d2eef 1497 }
31391048
SM
1498 }
1499
1500 return 0;
1501
1502err_free_xdpsq:
1503 clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
1504 mlx5e_free_xdpsq(sq);
1505
1506 return err;
1507}
1508
db05815b 1509void mlx5e_close_xdpsq(struct mlx5e_xdpsq *sq)
31391048
SM
1510{
1511 struct mlx5e_channel *c = sq->channel;
1512
1513 clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
9c25a22d 1514 synchronize_rcu(); /* Sync with NAPI. */
31391048 1515
a43b25da 1516 mlx5e_destroy_sq(c->mdev, sq->sqn);
b9673cf5 1517 mlx5e_free_xdpsq_descs(sq);
31391048 1518 mlx5e_free_xdpsq(sq);
f62b8bb8
AV
1519}
1520
95b6c6a5
EBE
1521static int mlx5e_alloc_cq_common(struct mlx5_core_dev *mdev,
1522 struct mlx5e_cq_param *param,
1523 struct mlx5e_cq *cq)
f62b8bb8 1524{
f62b8bb8
AV
1525 struct mlx5_core_cq *mcq = &cq->mcq;
1526 int eqn_not_used;
0b6e26ce 1527 unsigned int irqn;
f62b8bb8
AV
1528 int err;
1529 u32 i;
1530
a1f240f1
YA
1531 err = mlx5_vector2eqn(mdev, param->eq_ix, &eqn_not_used, &irqn);
1532 if (err)
1533 return err;
1534
f62b8bb8
AV
1535 err = mlx5_cqwq_create(mdev, &param->wq, param->cqc, &cq->wq,
1536 &cq->wq_ctrl);
1537 if (err)
1538 return err;
1539
f62b8bb8
AV
1540 mcq->cqe_sz = 64;
1541 mcq->set_ci_db = cq->wq_ctrl.db.db;
1542 mcq->arm_db = cq->wq_ctrl.db.db + 1;
1543 *mcq->set_ci_db = 0;
1544 *mcq->arm_db = 0;
1545 mcq->vector = param->eq_ix;
1546 mcq->comp = mlx5e_completion_event;
1547 mcq->event = mlx5e_cq_error_event;
1548 mcq->irqn = irqn;
f62b8bb8
AV
1549
1550 for (i = 0; i < mlx5_cqwq_get_size(&cq->wq); i++) {
1551 struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(&cq->wq, i);
1552
1553 cqe->op_own = 0xf1;
1554 }
1555
a43b25da 1556 cq->mdev = mdev;
f62b8bb8
AV
1557
1558 return 0;
1559}
1560
95b6c6a5
EBE
1561static int mlx5e_alloc_cq(struct mlx5e_channel *c,
1562 struct mlx5e_cq_param *param,
1563 struct mlx5e_cq *cq)
1564{
1565 struct mlx5_core_dev *mdev = c->priv->mdev;
1566 int err;
1567
231243c8
SM
1568 param->wq.buf_numa_node = cpu_to_node(c->cpu);
1569 param->wq.db_numa_node = cpu_to_node(c->cpu);
95b6c6a5
EBE
1570 param->eq_ix = c->ix;
1571
1572 err = mlx5e_alloc_cq_common(mdev, param, cq);
1573
1574 cq->napi = &c->napi;
1575 cq->channel = c;
1576
1577 return err;
1578}
1579
3b77235b 1580static void mlx5e_free_cq(struct mlx5e_cq *cq)
f62b8bb8 1581{
3a2f7033 1582 mlx5_wq_destroy(&cq->wq_ctrl);
f62b8bb8
AV
1583}
1584
3b77235b 1585static int mlx5e_create_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param)
f62b8bb8 1586{
38164b77 1587 u32 out[MLX5_ST_SZ_DW(create_cq_out)];
a43b25da 1588 struct mlx5_core_dev *mdev = cq->mdev;
f62b8bb8
AV
1589 struct mlx5_core_cq *mcq = &cq->mcq;
1590
1591 void *in;
1592 void *cqc;
1593 int inlen;
0b6e26ce 1594 unsigned int irqn_not_used;
f62b8bb8
AV
1595 int eqn;
1596 int err;
1597
a1f240f1
YA
1598 err = mlx5_vector2eqn(mdev, param->eq_ix, &eqn, &irqn_not_used);
1599 if (err)
1600 return err;
1601
f62b8bb8 1602 inlen = MLX5_ST_SZ_BYTES(create_cq_in) +
3a2f7033 1603 sizeof(u64) * cq->wq_ctrl.buf.npages;
1b9a07ee 1604 in = kvzalloc(inlen, GFP_KERNEL);
f62b8bb8
AV
1605 if (!in)
1606 return -ENOMEM;
1607
1608 cqc = MLX5_ADDR_OF(create_cq_in, in, cq_context);
1609
1610 memcpy(cqc, param->cqc, sizeof(param->cqc));
1611
3a2f7033 1612 mlx5_fill_page_frag_array(&cq->wq_ctrl.buf,
1c1b5228 1613 (__be64 *)MLX5_ADDR_OF(create_cq_in, in, pas));
f62b8bb8 1614
9908aa29 1615 MLX5_SET(cqc, cqc, cq_period_mode, param->cq_period_mode);
f62b8bb8 1616 MLX5_SET(cqc, cqc, c_eqn, eqn);
30aa60b3 1617 MLX5_SET(cqc, cqc, uar_page, mdev->priv.uar->index);
3a2f7033 1618 MLX5_SET(cqc, cqc, log_page_size, cq->wq_ctrl.buf.page_shift -
68cdf5d6 1619 MLX5_ADAPTER_PAGE_SHIFT);
f62b8bb8
AV
1620 MLX5_SET64(cqc, cqc, dbr_addr, cq->wq_ctrl.db.dma);
1621
38164b77 1622 err = mlx5_core_create_cq(mdev, mcq, in, inlen, out, sizeof(out));
f62b8bb8
AV
1623
1624 kvfree(in);
1625
1626 if (err)
1627 return err;
1628
1629 mlx5e_cq_arm(cq);
1630
1631 return 0;
1632}
1633
3b77235b 1634static void mlx5e_destroy_cq(struct mlx5e_cq *cq)
f62b8bb8 1635{
a43b25da 1636 mlx5_core_destroy_cq(cq->mdev, &cq->mcq);
f62b8bb8
AV
1637}
1638
c4cde580 1639int mlx5e_open_cq(struct mlx5e_channel *c, struct dim_cq_moder moder,
db05815b 1640 struct mlx5e_cq_param *param, struct mlx5e_cq *cq)
f62b8bb8 1641{
a43b25da 1642 struct mlx5_core_dev *mdev = c->mdev;
f62b8bb8 1643 int err;
f62b8bb8 1644
3b77235b 1645 err = mlx5e_alloc_cq(c, param, cq);
f62b8bb8
AV
1646 if (err)
1647 return err;
1648
3b77235b 1649 err = mlx5e_create_cq(cq, param);
f62b8bb8 1650 if (err)
3b77235b 1651 goto err_free_cq;
f62b8bb8 1652
7524a5d8 1653 if (MLX5_CAP_GEN(mdev, cq_moderation))
6a9764ef 1654 mlx5_core_modify_cq_moderation(mdev, &cq->mcq, moder.usec, moder.pkts);
f62b8bb8
AV
1655 return 0;
1656
3b77235b
SM
1657err_free_cq:
1658 mlx5e_free_cq(cq);
f62b8bb8
AV
1659
1660 return err;
1661}
1662
db05815b 1663void mlx5e_close_cq(struct mlx5e_cq *cq)
f62b8bb8 1664{
f62b8bb8 1665 mlx5e_destroy_cq(cq);
3b77235b 1666 mlx5e_free_cq(cq);
f62b8bb8
AV
1667}
1668
f62b8bb8 1669static int mlx5e_open_tx_cqs(struct mlx5e_channel *c,
6a9764ef 1670 struct mlx5e_params *params,
f62b8bb8
AV
1671 struct mlx5e_channel_param *cparam)
1672{
f62b8bb8
AV
1673 int err;
1674 int tc;
1675
1676 for (tc = 0; tc < c->num_tc; tc++) {
6a9764ef 1677 err = mlx5e_open_cq(c, params->tx_cq_moderation,
c293ac92 1678 &cparam->txq_sq.cqp, &c->sq[tc].cq);
f62b8bb8
AV
1679 if (err)
1680 goto err_close_tx_cqs;
f62b8bb8
AV
1681 }
1682
1683 return 0;
1684
1685err_close_tx_cqs:
1686 for (tc--; tc >= 0; tc--)
1687 mlx5e_close_cq(&c->sq[tc].cq);
1688
1689 return err;
1690}
1691
1692static void mlx5e_close_tx_cqs(struct mlx5e_channel *c)
1693{
1694 int tc;
1695
1696 for (tc = 0; tc < c->num_tc; tc++)
1697 mlx5e_close_cq(&c->sq[tc].cq);
1698}
1699
1700static int mlx5e_open_sqs(struct mlx5e_channel *c,
6a9764ef 1701 struct mlx5e_params *params,
f62b8bb8
AV
1702 struct mlx5e_channel_param *cparam)
1703{
694826e3 1704 int err, tc;
f62b8bb8 1705
6a9764ef 1706 for (tc = 0; tc < params->num_tc; tc++) {
c55d8b10 1707 int txq_ix = c->ix + tc * params->num_channels;
acc6c595 1708
45f171b1 1709 err = mlx5e_open_txqsq(c, c->priv->tisn[c->lag_port][tc], txq_ix,
c293ac92 1710 params, &cparam->txq_sq, &c->sq[tc], tc);
f62b8bb8
AV
1711 if (err)
1712 goto err_close_sqs;
1713 }
1714
1715 return 0;
1716
1717err_close_sqs:
1718 for (tc--; tc >= 0; tc--)
31391048 1719 mlx5e_close_txqsq(&c->sq[tc]);
f62b8bb8
AV
1720
1721 return err;
1722}
1723
1724static void mlx5e_close_sqs(struct mlx5e_channel *c)
1725{
1726 int tc;
1727
1728 for (tc = 0; tc < c->num_tc; tc++)
31391048 1729 mlx5e_close_txqsq(&c->sq[tc]);
f62b8bb8
AV
1730}
1731
507f0c81 1732static int mlx5e_set_sq_maxrate(struct net_device *dev,
31391048 1733 struct mlx5e_txqsq *sq, u32 rate)
507f0c81
YP
1734{
1735 struct mlx5e_priv *priv = netdev_priv(dev);
1736 struct mlx5_core_dev *mdev = priv->mdev;
33ad9711 1737 struct mlx5e_modify_sq_param msp = {0};
05d3ac97 1738 struct mlx5_rate_limit rl = {0};
507f0c81
YP
1739 u16 rl_index = 0;
1740 int err;
1741
1742 if (rate == sq->rate_limit)
1743 /* nothing to do */
1744 return 0;
1745
05d3ac97
BW
1746 if (sq->rate_limit) {
1747 rl.rate = sq->rate_limit;
507f0c81 1748 /* remove current rl index to free space to next ones */
05d3ac97
BW
1749 mlx5_rl_remove_rate(mdev, &rl);
1750 }
507f0c81
YP
1751
1752 sq->rate_limit = 0;
1753
1754 if (rate) {
05d3ac97
BW
1755 rl.rate = rate;
1756 err = mlx5_rl_add_rate(mdev, &rl_index, &rl);
507f0c81
YP
1757 if (err) {
1758 netdev_err(dev, "Failed configuring rate %u: %d\n",
1759 rate, err);
1760 return err;
1761 }
1762 }
1763
33ad9711
SM
1764 msp.curr_state = MLX5_SQC_STATE_RDY;
1765 msp.next_state = MLX5_SQC_STATE_RDY;
1766 msp.rl_index = rl_index;
1767 msp.rl_update = true;
a43b25da 1768 err = mlx5e_modify_sq(mdev, sq->sqn, &msp);
507f0c81
YP
1769 if (err) {
1770 netdev_err(dev, "Failed configuring rate %u: %d\n",
1771 rate, err);
1772 /* remove the rate from the table */
1773 if (rate)
05d3ac97 1774 mlx5_rl_remove_rate(mdev, &rl);
507f0c81
YP
1775 return err;
1776 }
1777
1778 sq->rate_limit = rate;
1779 return 0;
1780}
1781
1782static int mlx5e_set_tx_maxrate(struct net_device *dev, int index, u32 rate)
1783{
1784 struct mlx5e_priv *priv = netdev_priv(dev);
1785 struct mlx5_core_dev *mdev = priv->mdev;
acc6c595 1786 struct mlx5e_txqsq *sq = priv->txq2sq[index];
507f0c81
YP
1787 int err = 0;
1788
1789 if (!mlx5_rl_is_supported(mdev)) {
1790 netdev_err(dev, "Rate limiting is not supported on this device\n");
1791 return -EINVAL;
1792 }
1793
1794 /* rate is given in Mb/sec, HW config is in Kb/sec */
1795 rate = rate << 10;
1796
1797 /* Check whether rate in valid range, 0 is always valid */
1798 if (rate && !mlx5_rl_is_in_range(mdev, rate)) {
1799 netdev_err(dev, "TX rate %u, is not in range\n", rate);
1800 return -ERANGE;
1801 }
1802
1803 mutex_lock(&priv->state_lock);
1804 if (test_bit(MLX5E_STATE_OPENED, &priv->state))
1805 err = mlx5e_set_sq_maxrate(dev, sq, rate);
1806 if (!err)
1807 priv->tx_rates[index] = rate;
1808 mutex_unlock(&priv->state_lock);
1809
1810 return err;
1811}
1812
0a06382f
MM
1813static int mlx5e_open_queues(struct mlx5e_channel *c,
1814 struct mlx5e_params *params,
1815 struct mlx5e_channel_param *cparam)
f62b8bb8 1816{
8960b389 1817 struct dim_cq_moder icocq_moder = {0, 0};
f62b8bb8 1818 int err;
f62b8bb8 1819
c293ac92 1820 err = mlx5e_open_cq(c, icocq_moder, &cparam->icosq.cqp, &c->async_icosq.cq);
f62b8bb8 1821 if (err)
0a06382f 1822 return err;
f62b8bb8 1823
c293ac92 1824 err = mlx5e_open_cq(c, icocq_moder, &cparam->async_icosq.cqp, &c->icosq.cq);
8d94b590
TT
1825 if (err)
1826 goto err_close_async_icosq_cq;
1827
6a9764ef 1828 err = mlx5e_open_tx_cqs(c, params, cparam);
d3c9bc27
TT
1829 if (err)
1830 goto err_close_icosq_cq;
1831
c293ac92 1832 err = mlx5e_open_cq(c, params->tx_cq_moderation, &cparam->xdp_sq.cqp, &c->xdpsq.cq);
f62b8bb8
AV
1833 if (err)
1834 goto err_close_tx_cqs;
f62b8bb8 1835
c293ac92 1836 err = mlx5e_open_cq(c, params->rx_cq_moderation, &cparam->rq.cqp, &c->rq.cq);
58b99ee3
TT
1837 if (err)
1838 goto err_close_xdp_tx_cqs;
1839
6a9764ef 1840 err = c->xdp ? mlx5e_open_cq(c, params->tx_cq_moderation,
c293ac92 1841 &cparam->xdp_sq.cqp, &c->rq_xdpsq.cq) : 0;
d7a0ecab
SM
1842 if (err)
1843 goto err_close_rx_cq;
1844
f62b8bb8
AV
1845 napi_enable(&c->napi);
1846
8d94b590
TT
1847 spin_lock_init(&c->async_icosq_lock);
1848
c293ac92 1849 err = mlx5e_open_icosq(c, params, &cparam->async_icosq, &c->async_icosq);
f62b8bb8
AV
1850 if (err)
1851 goto err_disable_napi;
1852
8d94b590
TT
1853 err = mlx5e_open_icosq(c, params, &cparam->icosq, &c->icosq);
1854 if (err)
1855 goto err_close_async_icosq;
1856
6a9764ef 1857 err = mlx5e_open_sqs(c, params, cparam);
d3c9bc27
TT
1858 if (err)
1859 goto err_close_icosq;
1860
b9673cf5 1861 if (c->xdp) {
db05815b 1862 err = mlx5e_open_xdpsq(c, params, &cparam->xdp_sq, NULL,
b9673cf5
MM
1863 &c->rq_xdpsq, false);
1864 if (err)
1865 goto err_close_sqs;
1866 }
b5503b99 1867
db05815b 1868 err = mlx5e_open_rq(c, params, &cparam->rq, NULL, NULL, &c->rq);
f62b8bb8 1869 if (err)
b5503b99 1870 goto err_close_xdp_sq;
f62b8bb8 1871
db05815b 1872 err = mlx5e_open_xdpsq(c, params, &cparam->xdp_sq, NULL, &c->xdpsq, true);
58b99ee3
TT
1873 if (err)
1874 goto err_close_rq;
1875
f62b8bb8 1876 return 0;
58b99ee3
TT
1877
1878err_close_rq:
1879 mlx5e_close_rq(&c->rq);
1880
b5503b99 1881err_close_xdp_sq:
d7a0ecab 1882 if (c->xdp)
b9673cf5 1883 mlx5e_close_xdpsq(&c->rq_xdpsq);
f62b8bb8
AV
1884
1885err_close_sqs:
1886 mlx5e_close_sqs(c);
1887
d3c9bc27 1888err_close_icosq:
31391048 1889 mlx5e_close_icosq(&c->icosq);
d3c9bc27 1890
8d94b590
TT
1891err_close_async_icosq:
1892 mlx5e_close_icosq(&c->async_icosq);
1893
f62b8bb8
AV
1894err_disable_napi:
1895 napi_disable(&c->napi);
0a06382f 1896
d7a0ecab 1897 if (c->xdp)
b9673cf5 1898 mlx5e_close_cq(&c->rq_xdpsq.cq);
d7a0ecab
SM
1899
1900err_close_rx_cq:
f62b8bb8
AV
1901 mlx5e_close_cq(&c->rq.cq);
1902
58b99ee3
TT
1903err_close_xdp_tx_cqs:
1904 mlx5e_close_cq(&c->xdpsq.cq);
1905
f62b8bb8
AV
1906err_close_tx_cqs:
1907 mlx5e_close_tx_cqs(c);
1908
d3c9bc27
TT
1909err_close_icosq_cq:
1910 mlx5e_close_cq(&c->icosq.cq);
1911
8d94b590
TT
1912err_close_async_icosq_cq:
1913 mlx5e_close_cq(&c->async_icosq.cq);
1914
0a06382f
MM
1915 return err;
1916}
1917
1918static void mlx5e_close_queues(struct mlx5e_channel *c)
1919{
1920 mlx5e_close_xdpsq(&c->xdpsq);
1921 mlx5e_close_rq(&c->rq);
1922 if (c->xdp)
1923 mlx5e_close_xdpsq(&c->rq_xdpsq);
1924 mlx5e_close_sqs(c);
1925 mlx5e_close_icosq(&c->icosq);
8d94b590 1926 mlx5e_close_icosq(&c->async_icosq);
0a06382f
MM
1927 napi_disable(&c->napi);
1928 if (c->xdp)
1929 mlx5e_close_cq(&c->rq_xdpsq.cq);
1930 mlx5e_close_cq(&c->rq.cq);
1931 mlx5e_close_cq(&c->xdpsq.cq);
1932 mlx5e_close_tx_cqs(c);
1933 mlx5e_close_cq(&c->icosq.cq);
8d94b590 1934 mlx5e_close_cq(&c->async_icosq.cq);
0a06382f
MM
1935}
1936
45f171b1
MM
1937static u8 mlx5e_enumerate_lag_port(struct mlx5_core_dev *mdev, int ix)
1938{
1939 u16 port_aff_bias = mlx5_core_is_pf(mdev) ? 0 : MLX5_CAP_GEN(mdev, vhca_id);
1940
1941 return (ix + port_aff_bias) % mlx5e_get_num_lag_ports(mdev);
1942}
1943
0a06382f
MM
1944static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
1945 struct mlx5e_params *params,
1946 struct mlx5e_channel_param *cparam,
1742b3d5 1947 struct xsk_buff_pool *xsk_pool,
0a06382f
MM
1948 struct mlx5e_channel **cp)
1949{
1950 int cpu = cpumask_first(mlx5_comp_irq_get_affinity_mask(priv->mdev, ix));
1951 struct net_device *netdev = priv->netdev;
db05815b 1952 struct mlx5e_xsk_param xsk;
0a06382f
MM
1953 struct mlx5e_channel *c;
1954 unsigned int irq;
1955 int err;
1956 int eqn;
1957
1958 err = mlx5_vector2eqn(priv->mdev, ix, &eqn, &irq);
1959 if (err)
1960 return err;
1961
1962 c = kvzalloc_node(sizeof(*c), GFP_KERNEL, cpu_to_node(cpu));
1963 if (!c)
1964 return -ENOMEM;
1965
1966 c->priv = priv;
1967 c->mdev = priv->mdev;
1968 c->tstamp = &priv->tstamp;
1969 c->ix = ix;
1970 c->cpu = cpu;
7be3412a 1971 c->pdev = mlx5_core_dma_dev(priv->mdev);
0a06382f
MM
1972 c->netdev = priv->netdev;
1973 c->mkey_be = cpu_to_be32(priv->mdev->mlx5e_res.mkey.key);
1974 c->num_tc = params->num_tc;
1975 c->xdp = !!params->xdp_prog;
1976 c->stats = &priv->channel_stats[ix].ch;
1977 c->irq_desc = irq_to_desc(irq);
45f171b1 1978 c->lag_port = mlx5e_enumerate_lag_port(priv->mdev, ix);
0a06382f 1979
0a06382f
MM
1980 netif_napi_add(netdev, &c->napi, mlx5e_napi_poll, 64);
1981
1982 err = mlx5e_open_queues(c, params, cparam);
1983 if (unlikely(err))
1984 goto err_napi_del;
1985
1742b3d5
MK
1986 if (xsk_pool) {
1987 mlx5e_build_xsk_param(xsk_pool, &xsk);
1988 err = mlx5e_open_xsk(priv, params, &xsk, xsk_pool, c);
db05815b
MM
1989 if (unlikely(err))
1990 goto err_close_queues;
1991 }
1992
0a06382f
MM
1993 *cp = c;
1994
1995 return 0;
1996
db05815b
MM
1997err_close_queues:
1998 mlx5e_close_queues(c);
1999
f62b8bb8
AV
2000err_napi_del:
2001 netif_napi_del(&c->napi);
149e566f 2002
ca11b798 2003 kvfree(c);
f62b8bb8
AV
2004
2005 return err;
2006}
2007
acc6c595
SM
2008static void mlx5e_activate_channel(struct mlx5e_channel *c)
2009{
2010 int tc;
2011
2012 for (tc = 0; tc < c->num_tc; tc++)
2013 mlx5e_activate_txqsq(&c->sq[tc]);
9d18b514 2014 mlx5e_activate_icosq(&c->icosq);
8d94b590 2015 mlx5e_activate_icosq(&c->async_icosq);
acc6c595 2016 mlx5e_activate_rq(&c->rq);
db05815b
MM
2017
2018 if (test_bit(MLX5E_CHANNEL_STATE_XSK, c->state))
2019 mlx5e_activate_xsk(c);
acc6c595
SM
2020}
2021
2022static void mlx5e_deactivate_channel(struct mlx5e_channel *c)
2023{
2024 int tc;
2025
db05815b
MM
2026 if (test_bit(MLX5E_CHANNEL_STATE_XSK, c->state))
2027 mlx5e_deactivate_xsk(c);
2028
acc6c595 2029 mlx5e_deactivate_rq(&c->rq);
8d94b590 2030 mlx5e_deactivate_icosq(&c->async_icosq);
9d18b514 2031 mlx5e_deactivate_icosq(&c->icosq);
acc6c595
SM
2032 for (tc = 0; tc < c->num_tc; tc++)
2033 mlx5e_deactivate_txqsq(&c->sq[tc]);
2034}
2035
f62b8bb8
AV
2036static void mlx5e_close_channel(struct mlx5e_channel *c)
2037{
db05815b
MM
2038 if (test_bit(MLX5E_CHANNEL_STATE_XSK, c->state))
2039 mlx5e_close_xsk(c);
0a06382f 2040 mlx5e_close_queues(c);
f62b8bb8 2041 netif_napi_del(&c->napi);
7ae92ae5 2042
ca11b798 2043 kvfree(c);
f62b8bb8
AV
2044}
2045
069d1146
TT
2046#define DEFAULT_FRAG_SIZE (2048)
2047
2048static void mlx5e_build_rq_frags_info(struct mlx5_core_dev *mdev,
2049 struct mlx5e_params *params,
db05815b 2050 struct mlx5e_xsk_param *xsk,
069d1146
TT
2051 struct mlx5e_rq_frags_info *info)
2052{
2053 u32 byte_count = MLX5E_SW2HW_MTU(params, params->sw_mtu);
2054 int frag_size_max = DEFAULT_FRAG_SIZE;
2055 u32 buf_size = 0;
2056 int i;
2057
2058#ifdef CONFIG_MLX5_EN_IPSEC
2059 if (MLX5_IPSEC_DEV(mdev))
2060 byte_count += MLX5E_METADATA_ETHER_LEN;
2061#endif
2062
db05815b 2063 if (mlx5e_rx_is_linear_skb(params, xsk)) {
069d1146
TT
2064 int frag_stride;
2065
db05815b 2066 frag_stride = mlx5e_rx_get_linear_frag_sz(params, xsk);
069d1146
TT
2067 frag_stride = roundup_pow_of_two(frag_stride);
2068
2069 info->arr[0].frag_size = byte_count;
2070 info->arr[0].frag_stride = frag_stride;
2071 info->num_frags = 1;
2072 info->wqe_bulk = PAGE_SIZE / frag_stride;
2073 goto out;
2074 }
2075
2076 if (byte_count > PAGE_SIZE +
2077 (MLX5E_MAX_RX_FRAGS - 1) * frag_size_max)
2078 frag_size_max = PAGE_SIZE;
2079
2080 i = 0;
2081 while (buf_size < byte_count) {
2082 int frag_size = byte_count - buf_size;
2083
2084 if (i < MLX5E_MAX_RX_FRAGS - 1)
2085 frag_size = min(frag_size, frag_size_max);
2086
2087 info->arr[i].frag_size = frag_size;
2088 info->arr[i].frag_stride = roundup_pow_of_two(frag_size);
2089
2090 buf_size += frag_size;
2091 i++;
2092 }
2093 info->num_frags = i;
2094 /* number of different wqes sharing a page */
2095 info->wqe_bulk = 1 + (info->num_frags % 2);
2096
2097out:
2098 info->wqe_bulk = max_t(u8, info->wqe_bulk, 8);
2099 info->log_num_frags = order_base_2(info->num_frags);
2100}
2101
99cbfa93
TT
2102static inline u8 mlx5e_get_rqwq_log_stride(u8 wq_type, int ndsegs)
2103{
2104 int sz = sizeof(struct mlx5_wqe_data_seg) * ndsegs;
2105
2106 switch (wq_type) {
2107 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
2108 sz += sizeof(struct mlx5e_rx_wqe_ll);
2109 break;
2110 default: /* MLX5_WQ_TYPE_CYCLIC */
2111 sz += sizeof(struct mlx5e_rx_wqe_cyc);
2112 }
2113
2114 return order_base_2(sz);
2115}
2116
fd9b4be8
TT
2117static u8 mlx5e_get_rq_log_wq_sz(void *rqc)
2118{
2119 void *wq = MLX5_ADDR_OF(rqc, rqc, wq);
2120
2121 return MLX5_GET(wq, wq, log_wq_sz);
2122}
2123
db05815b
MM
2124void mlx5e_build_rq_param(struct mlx5e_priv *priv,
2125 struct mlx5e_params *params,
2126 struct mlx5e_xsk_param *xsk,
2127 struct mlx5e_rq_param *param)
f62b8bb8 2128{
f1e4fc9b 2129 struct mlx5_core_dev *mdev = priv->mdev;
f62b8bb8
AV
2130 void *rqc = param->rqc;
2131 void *wq = MLX5_ADDR_OF(rqc, rqc, wq);
99cbfa93 2132 int ndsegs = 1;
f62b8bb8 2133
6a9764ef 2134 switch (params->rq_wq_type) {
461017cb 2135 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
f1e4fc9b 2136 MLX5_SET(wq, wq, log_wqe_num_of_strides,
db05815b 2137 mlx5e_mpwqe_get_log_num_strides(mdev, params, xsk) -
619a8f2a 2138 MLX5_MPWQE_LOG_NUM_STRIDES_BASE);
f1e4fc9b 2139 MLX5_SET(wq, wq, log_wqe_stride_size,
db05815b 2140 mlx5e_mpwqe_get_log_stride_size(mdev, params, xsk) -
619a8f2a 2141 MLX5_MPWQE_LOG_STRIDE_SZ_BASE);
db05815b 2142 MLX5_SET(wq, wq, log_wq_sz, mlx5e_mpwqe_get_log_rq_size(params, xsk));
461017cb 2143 break;
99cbfa93 2144 default: /* MLX5_WQ_TYPE_CYCLIC */
73281b78 2145 MLX5_SET(wq, wq, log_wq_sz, params->log_rq_mtu_frames);
db05815b 2146 mlx5e_build_rq_frags_info(mdev, params, xsk, &param->frags_info);
069d1146 2147 ndsegs = param->frags_info.num_frags;
461017cb
TT
2148 }
2149
99cbfa93 2150 MLX5_SET(wq, wq, wq_type, params->rq_wq_type);
f62b8bb8 2151 MLX5_SET(wq, wq, end_padding_mode, MLX5_WQ_END_PAD_MODE_ALIGN);
99cbfa93
TT
2152 MLX5_SET(wq, wq, log_wq_stride,
2153 mlx5e_get_rqwq_log_stride(params->rq_wq_type, ndsegs));
f1e4fc9b 2154 MLX5_SET(wq, wq, pd, mdev->mlx5e_res.pdn);
593cf338 2155 MLX5_SET(rqc, rqc, counter_set_id, priv->q_counter);
6a9764ef 2156 MLX5_SET(rqc, rqc, vsd, params->vlan_strip_disable);
102722fc 2157 MLX5_SET(rqc, rqc, scatter_fcs, params->scatter_fcs_en);
f62b8bb8 2158
7be3412a 2159 param->wq.buf_numa_node = dev_to_node(mlx5_core_dma_dev(mdev));
c293ac92 2160 mlx5e_build_rx_cq_param(priv, params, xsk, &param->cqp);
f62b8bb8
AV
2161}
2162
7cbaf9a3 2163static void mlx5e_build_drop_rq_param(struct mlx5e_priv *priv,
2f0db879 2164 struct mlx5e_rq_param *param)
556dd1b9 2165{
7cbaf9a3 2166 struct mlx5_core_dev *mdev = priv->mdev;
556dd1b9
TT
2167 void *rqc = param->rqc;
2168 void *wq = MLX5_ADDR_OF(rqc, rqc, wq);
2169
99cbfa93
TT
2170 MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC);
2171 MLX5_SET(wq, wq, log_wq_stride,
2172 mlx5e_get_rqwq_log_stride(MLX5_WQ_TYPE_CYCLIC, 1));
7cbaf9a3 2173 MLX5_SET(rqc, rqc, counter_set_id, priv->drop_rq_q_counter);
2f0db879 2174
7be3412a 2175 param->wq.buf_numa_node = dev_to_node(mlx5_core_dma_dev(mdev));
556dd1b9
TT
2176}
2177
db05815b
MM
2178void mlx5e_build_sq_param_common(struct mlx5e_priv *priv,
2179 struct mlx5e_sq_param *param)
f62b8bb8
AV
2180{
2181 void *sqc = param->sqc;
2182 void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
2183
f62b8bb8 2184 MLX5_SET(wq, wq, log_wq_stride, ilog2(MLX5_SEND_WQE_BB));
b50d292b 2185 MLX5_SET(wq, wq, pd, priv->mdev->mlx5e_res.pdn);
f62b8bb8 2186
7be3412a 2187 param->wq.buf_numa_node = dev_to_node(mlx5_core_dma_dev(priv->mdev));
d3c9bc27
TT
2188}
2189
2190static void mlx5e_build_sq_param(struct mlx5e_priv *priv,
6a9764ef 2191 struct mlx5e_params *params,
d3c9bc27
TT
2192 struct mlx5e_sq_param *param)
2193{
2194 void *sqc = param->sqc;
2195 void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
e3cfc7e6 2196 bool allow_swp;
d3c9bc27 2197
e3cfc7e6
MS
2198 allow_swp = mlx5_geneve_tx_allowed(priv->mdev) ||
2199 !!MLX5_IPSEC_DEV(priv->mdev);
d3c9bc27 2200 mlx5e_build_sq_param_common(priv, param);
6a9764ef 2201 MLX5_SET(wq, wq, log_wq_sz, params->log_sq_size);
e3cfc7e6 2202 MLX5_SET(sqc, sqc, allow_swp, allow_swp);
5af75c74 2203 param->is_mpw = MLX5E_GET_PFLAG(params, MLX5E_PFLAG_SKB_TX_MPWQE);
579524c6 2204 param->stop_room = mlx5e_calc_sq_stop_room(priv->mdev, params);
c293ac92 2205 mlx5e_build_tx_cq_param(priv, params, &param->cqp);
f62b8bb8
AV
2206}
2207
2208static void mlx5e_build_common_cq_param(struct mlx5e_priv *priv,
2209 struct mlx5e_cq_param *param)
2210{
2211 void *cqc = param->cqc;
2212
30aa60b3 2213 MLX5_SET(cqc, cqc, uar_page, priv->mdev->priv.uar->index);
939de57d
DJ
2214 if (MLX5_CAP_GEN(priv->mdev, cqe_128_always) && cache_line_size() >= 128)
2215 MLX5_SET(cqc, cqc, cqe_sz, CQE_STRIDE_128_PAD);
f62b8bb8
AV
2216}
2217
db05815b
MM
2218void mlx5e_build_rx_cq_param(struct mlx5e_priv *priv,
2219 struct mlx5e_params *params,
2220 struct mlx5e_xsk_param *xsk,
2221 struct mlx5e_cq_param *param)
f62b8bb8 2222{
73281b78 2223 struct mlx5_core_dev *mdev = priv->mdev;
b7cf0806 2224 bool hw_stridx = false;
f62b8bb8 2225 void *cqc = param->cqc;
461017cb 2226 u8 log_cq_size;
f62b8bb8 2227
6a9764ef 2228 switch (params->rq_wq_type) {
461017cb 2229 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
db05815b
MM
2230 log_cq_size = mlx5e_mpwqe_get_log_rq_size(params, xsk) +
2231 mlx5e_mpwqe_get_log_num_strides(mdev, params, xsk);
b7cf0806 2232 hw_stridx = MLX5_CAP_GEN(mdev, mini_cqe_resp_stride_index);
461017cb 2233 break;
99cbfa93 2234 default: /* MLX5_WQ_TYPE_CYCLIC */
73281b78 2235 log_cq_size = params->log_rq_mtu_frames;
461017cb
TT
2236 }
2237
2238 MLX5_SET(cqc, cqc, log_cq_size, log_cq_size);
6a9764ef 2239 if (MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS)) {
b7cf0806
OL
2240 MLX5_SET(cqc, cqc, mini_cqe_res_format, hw_stridx ?
2241 MLX5_CQE_FORMAT_CSUM_STRIDX : MLX5_CQE_FORMAT_CSUM);
7219ab34
TT
2242 MLX5_SET(cqc, cqc, cqe_comp_en, 1);
2243 }
f62b8bb8
AV
2244
2245 mlx5e_build_common_cq_param(priv, param);
0088cbbc 2246 param->cq_period_mode = params->rx_cq_moderation.cq_period_mode;
f62b8bb8
AV
2247}
2248
db05815b
MM
2249void mlx5e_build_tx_cq_param(struct mlx5e_priv *priv,
2250 struct mlx5e_params *params,
2251 struct mlx5e_cq_param *param)
f62b8bb8
AV
2252{
2253 void *cqc = param->cqc;
2254
6a9764ef 2255 MLX5_SET(cqc, cqc, log_cq_size, params->log_sq_size);
f62b8bb8
AV
2256
2257 mlx5e_build_common_cq_param(priv, param);
0088cbbc 2258 param->cq_period_mode = params->tx_cq_moderation.cq_period_mode;
f62b8bb8
AV
2259}
2260
db05815b
MM
2261void mlx5e_build_ico_cq_param(struct mlx5e_priv *priv,
2262 u8 log_wq_size,
2263 struct mlx5e_cq_param *param)
d3c9bc27
TT
2264{
2265 void *cqc = param->cqc;
2266
2267 MLX5_SET(cqc, cqc, log_cq_size, log_wq_size);
2268
2269 mlx5e_build_common_cq_param(priv, param);
9908aa29 2270
c002bd52 2271 param->cq_period_mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
d3c9bc27
TT
2272}
2273
db05815b
MM
2274void mlx5e_build_icosq_param(struct mlx5e_priv *priv,
2275 u8 log_wq_size,
2276 struct mlx5e_sq_param *param)
d3c9bc27
TT
2277{
2278 void *sqc = param->sqc;
2279 void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
2280
2281 mlx5e_build_sq_param_common(priv, param);
2282
2283 MLX5_SET(wq, wq, log_wq_sz, log_wq_size);
bc77b240 2284 MLX5_SET(sqc, sqc, reg_umr, MLX5_CAP_ETH(priv->mdev, reg_umr_sq));
c293ac92 2285 mlx5e_build_ico_cq_param(priv, log_wq_size, &param->cqp);
d3c9bc27
TT
2286}
2287
db05815b
MM
2288void mlx5e_build_xdpsq_param(struct mlx5e_priv *priv,
2289 struct mlx5e_params *params,
2290 struct mlx5e_sq_param *param)
b5503b99
SM
2291{
2292 void *sqc = param->sqc;
2293 void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
2294
2295 mlx5e_build_sq_param_common(priv, param);
6a9764ef 2296 MLX5_SET(wq, wq, log_wq_sz, params->log_sq_size);
6277053a 2297 param->is_mpw = MLX5E_GET_PFLAG(params, MLX5E_PFLAG_XDP_TX_MPWQE);
c293ac92 2298 mlx5e_build_tx_cq_param(priv, params, &param->cqp);
b5503b99
SM
2299}
2300
fd9b4be8
TT
2301static u8 mlx5e_build_icosq_log_wq_sz(struct mlx5e_params *params,
2302 struct mlx5e_rq_param *rqp)
2303{
2304 switch (params->rq_wq_type) {
2305 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
2306 return order_base_2(MLX5E_UMR_WQEBBS) +
2307 mlx5e_get_rq_log_wq_sz(rqp->rqc);
2308 default: /* MLX5_WQ_TYPE_CYCLIC */
2309 return MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE;
2310 }
2311}
2312
c5607360
TT
2313static u8 mlx5e_build_async_icosq_log_wq_sz(struct net_device *netdev)
2314{
2315 if (netdev->hw_features & NETIF_F_HW_TLS_RX)
2316 return MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE;
2317
2318 return MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE;
2319}
2320
6a9764ef
SM
2321static void mlx5e_build_channel_param(struct mlx5e_priv *priv,
2322 struct mlx5e_params *params,
2323 struct mlx5e_channel_param *cparam)
f62b8bb8 2324{
c293ac92 2325 u8 icosq_log_wq_sz, async_icosq_log_wq_sz;
d3c9bc27 2326
db05815b 2327 mlx5e_build_rq_param(priv, params, NULL, &cparam->rq);
fd9b4be8
TT
2328
2329 icosq_log_wq_sz = mlx5e_build_icosq_log_wq_sz(params, &cparam->rq);
c5607360 2330 async_icosq_log_wq_sz = mlx5e_build_async_icosq_log_wq_sz(priv->netdev);
fd9b4be8 2331
c293ac92 2332 mlx5e_build_sq_param(priv, params, &cparam->txq_sq);
6a9764ef
SM
2333 mlx5e_build_xdpsq_param(priv, params, &cparam->xdp_sq);
2334 mlx5e_build_icosq_param(priv, icosq_log_wq_sz, &cparam->icosq);
c293ac92 2335 mlx5e_build_icosq_param(priv, async_icosq_log_wq_sz, &cparam->async_icosq);
f62b8bb8
AV
2336}
2337
55c2503d
SM
2338int mlx5e_open_channels(struct mlx5e_priv *priv,
2339 struct mlx5e_channels *chs)
f62b8bb8 2340{
6b87663f 2341 struct mlx5e_channel_param *cparam;
03289b88 2342 int err = -ENOMEM;
f62b8bb8 2343 int i;
f62b8bb8 2344
6a9764ef 2345 chs->num = chs->params.num_channels;
03289b88 2346
ff9c852f 2347 chs->c = kcalloc(chs->num, sizeof(struct mlx5e_channel *), GFP_KERNEL);
ca11b798 2348 cparam = kvzalloc(sizeof(struct mlx5e_channel_param), GFP_KERNEL);
acc6c595
SM
2349 if (!chs->c || !cparam)
2350 goto err_free;
f62b8bb8 2351
6a9764ef 2352 mlx5e_build_channel_param(priv, &chs->params, cparam);
ff9c852f 2353 for (i = 0; i < chs->num; i++) {
1742b3d5 2354 struct xsk_buff_pool *xsk_pool = NULL;
db05815b
MM
2355
2356 if (chs->params.xdp_prog)
1742b3d5 2357 xsk_pool = mlx5e_xsk_get_pool(&chs->params, chs->params.xsk, i);
db05815b 2358
1742b3d5 2359 err = mlx5e_open_channel(priv, i, &chs->params, cparam, xsk_pool, &chs->c[i]);
f62b8bb8
AV
2360 if (err)
2361 goto err_close_channels;
2362 }
2363
11af6a6d 2364 mlx5e_health_channels_update(priv);
ca11b798 2365 kvfree(cparam);
f62b8bb8
AV
2366 return 0;
2367
2368err_close_channels:
2369 for (i--; i >= 0; i--)
ff9c852f 2370 mlx5e_close_channel(chs->c[i]);
f62b8bb8 2371
acc6c595 2372err_free:
ff9c852f 2373 kfree(chs->c);
ca11b798 2374 kvfree(cparam);
ff9c852f 2375 chs->num = 0;
f62b8bb8
AV
2376 return err;
2377}
2378
acc6c595 2379static void mlx5e_activate_channels(struct mlx5e_channels *chs)
f62b8bb8
AV
2380{
2381 int i;
2382
acc6c595
SM
2383 for (i = 0; i < chs->num; i++)
2384 mlx5e_activate_channel(chs->c[i]);
2385}
2386
f8ebecf2
MM
2387#define MLX5E_RQ_WQES_TIMEOUT 20000 /* msecs */
2388
acc6c595
SM
2389static int mlx5e_wait_channels_min_rx_wqes(struct mlx5e_channels *chs)
2390{
2391 int err = 0;
2392 int i;
2393
f8ebecf2
MM
2394 for (i = 0; i < chs->num; i++) {
2395 int timeout = err ? 0 : MLX5E_RQ_WQES_TIMEOUT;
2396
2397 err |= mlx5e_wait_for_min_rx_wqes(&chs->c[i]->rq, timeout);
db05815b
MM
2398
2399 /* Don't wait on the XSK RQ, because the newer xdpsock sample
2400 * doesn't provide any Fill Ring entries at the setup stage.
2401 */
f8ebecf2 2402 }
acc6c595 2403
1e7477ae 2404 return err ? -ETIMEDOUT : 0;
acc6c595
SM
2405}
2406
2407static void mlx5e_deactivate_channels(struct mlx5e_channels *chs)
2408{
2409 int i;
2410
2411 for (i = 0; i < chs->num; i++)
2412 mlx5e_deactivate_channel(chs->c[i]);
2413}
2414
55c2503d 2415void mlx5e_close_channels(struct mlx5e_channels *chs)
acc6c595
SM
2416{
2417 int i;
c3b7c5c9 2418
ff9c852f
SM
2419 for (i = 0; i < chs->num; i++)
2420 mlx5e_close_channel(chs->c[i]);
f62b8bb8 2421
ff9c852f
SM
2422 kfree(chs->c);
2423 chs->num = 0;
f62b8bb8
AV
2424}
2425
a5f97fee
SM
2426static int
2427mlx5e_create_rqt(struct mlx5e_priv *priv, int sz, struct mlx5e_rqt *rqt)
f62b8bb8
AV
2428{
2429 struct mlx5_core_dev *mdev = priv->mdev;
f62b8bb8
AV
2430 void *rqtc;
2431 int inlen;
2432 int err;
1da36696 2433 u32 *in;
a5f97fee 2434 int i;
f62b8bb8 2435
f62b8bb8 2436 inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + sizeof(u32) * sz;
1b9a07ee 2437 in = kvzalloc(inlen, GFP_KERNEL);
f62b8bb8
AV
2438 if (!in)
2439 return -ENOMEM;
2440
2441 rqtc = MLX5_ADDR_OF(create_rqt_in, in, rqt_context);
2442
2443 MLX5_SET(rqtc, rqtc, rqt_actual_size, sz);
2444 MLX5_SET(rqtc, rqtc, rqt_max_size, sz);
2445
a5f97fee
SM
2446 for (i = 0; i < sz; i++)
2447 MLX5_SET(rqtc, rqtc, rq_num[i], priv->drop_rq.rqn);
2be6967c 2448
398f3351
HHZ
2449 err = mlx5_core_create_rqt(mdev, in, inlen, &rqt->rqtn);
2450 if (!err)
2451 rqt->enabled = true;
f62b8bb8
AV
2452
2453 kvfree(in);
1da36696
TT
2454 return err;
2455}
2456
cb67b832 2457void mlx5e_destroy_rqt(struct mlx5e_priv *priv, struct mlx5e_rqt *rqt)
1da36696 2458{
398f3351
HHZ
2459 rqt->enabled = false;
2460 mlx5_core_destroy_rqt(priv->mdev, rqt->rqtn);
1da36696
TT
2461}
2462
8f493ffd 2463int mlx5e_create_indirect_rqt(struct mlx5e_priv *priv)
6bfd390b
HHZ
2464{
2465 struct mlx5e_rqt *rqt = &priv->indir_rqt;
8f493ffd 2466 int err;
6bfd390b 2467
8f493ffd
SM
2468 err = mlx5e_create_rqt(priv, MLX5E_INDIR_RQT_SIZE, rqt);
2469 if (err)
2470 mlx5_core_warn(priv->mdev, "create indirect rqts failed, %d\n", err);
2471 return err;
6bfd390b
HHZ
2472}
2473
db05815b 2474int mlx5e_create_direct_rqts(struct mlx5e_priv *priv, struct mlx5e_tir *tirs)
1da36696 2475{
1da36696
TT
2476 int err;
2477 int ix;
2478
694826e3 2479 for (ix = 0; ix < priv->max_nch; ix++) {
db05815b
MM
2480 err = mlx5e_create_rqt(priv, 1 /*size */, &tirs[ix].rqt);
2481 if (unlikely(err))
1da36696
TT
2482 goto err_destroy_rqts;
2483 }
2484
2485 return 0;
2486
2487err_destroy_rqts:
db05815b 2488 mlx5_core_warn(priv->mdev, "create rqts failed, %d\n", err);
1da36696 2489 for (ix--; ix >= 0; ix--)
db05815b 2490 mlx5e_destroy_rqt(priv, &tirs[ix].rqt);
1da36696 2491
f62b8bb8
AV
2492 return err;
2493}
2494
db05815b 2495void mlx5e_destroy_direct_rqts(struct mlx5e_priv *priv, struct mlx5e_tir *tirs)
8f493ffd
SM
2496{
2497 int i;
2498
694826e3 2499 for (i = 0; i < priv->max_nch; i++)
db05815b 2500 mlx5e_destroy_rqt(priv, &tirs[i].rqt);
8f493ffd
SM
2501}
2502
a5f97fee
SM
2503static int mlx5e_rx_hash_fn(int hfunc)
2504{
2505 return (hfunc == ETH_RSS_HASH_TOP) ?
2506 MLX5_RX_HASH_FN_TOEPLITZ :
2507 MLX5_RX_HASH_FN_INVERTED_XOR8;
2508}
2509
3f6d08d1 2510int mlx5e_bits_invert(unsigned long a, int size)
a5f97fee
SM
2511{
2512 int inv = 0;
2513 int i;
2514
2515 for (i = 0; i < size; i++)
2516 inv |= (test_bit(size - i - 1, &a) ? 1 : 0) << i;
2517
2518 return inv;
2519}
2520
2521static void mlx5e_fill_rqt_rqns(struct mlx5e_priv *priv, int sz,
2522 struct mlx5e_redirect_rqt_param rrp, void *rqtc)
2523{
2524 int i;
2525
2526 for (i = 0; i < sz; i++) {
2527 u32 rqn;
2528
2529 if (rrp.is_rss) {
2530 int ix = i;
2531
2532 if (rrp.rss.hfunc == ETH_RSS_HASH_XOR)
2533 ix = mlx5e_bits_invert(i, ilog2(sz));
2534
bbeb53b8 2535 ix = priv->rss_params.indirection_rqt[ix];
a5f97fee
SM
2536 rqn = rrp.rss.channels->c[ix]->rq.rqn;
2537 } else {
2538 rqn = rrp.rqn;
2539 }
2540 MLX5_SET(rqtc, rqtc, rq_num[i], rqn);
2541 }
2542}
2543
2544int mlx5e_redirect_rqt(struct mlx5e_priv *priv, u32 rqtn, int sz,
2545 struct mlx5e_redirect_rqt_param rrp)
5c50368f
AS
2546{
2547 struct mlx5_core_dev *mdev = priv->mdev;
5c50368f
AS
2548 void *rqtc;
2549 int inlen;
1da36696 2550 u32 *in;
5c50368f
AS
2551 int err;
2552
5c50368f 2553 inlen = MLX5_ST_SZ_BYTES(modify_rqt_in) + sizeof(u32) * sz;
1b9a07ee 2554 in = kvzalloc(inlen, GFP_KERNEL);
5c50368f
AS
2555 if (!in)
2556 return -ENOMEM;
2557
2558 rqtc = MLX5_ADDR_OF(modify_rqt_in, in, ctx);
2559
2560 MLX5_SET(rqtc, rqtc, rqt_actual_size, sz);
5c50368f 2561 MLX5_SET(modify_rqt_in, in, bitmask.rqn_list, 1);
a5f97fee 2562 mlx5e_fill_rqt_rqns(priv, sz, rrp, rqtc);
1da36696 2563 err = mlx5_core_modify_rqt(mdev, rqtn, in, inlen);
5c50368f
AS
2564
2565 kvfree(in);
5c50368f
AS
2566 return err;
2567}
2568
a5f97fee
SM
2569static u32 mlx5e_get_direct_rqn(struct mlx5e_priv *priv, int ix,
2570 struct mlx5e_redirect_rqt_param rrp)
2571{
2572 if (!rrp.is_rss)
2573 return rrp.rqn;
2574
2575 if (ix >= rrp.rss.channels->num)
2576 return priv->drop_rq.rqn;
2577
2578 return rrp.rss.channels->c[ix]->rq.rqn;
2579}
2580
2581static void mlx5e_redirect_rqts(struct mlx5e_priv *priv,
2582 struct mlx5e_redirect_rqt_param rrp)
40ab6a6e 2583{
1da36696
TT
2584 u32 rqtn;
2585 int ix;
2586
398f3351 2587 if (priv->indir_rqt.enabled) {
a5f97fee 2588 /* RSS RQ table */
398f3351 2589 rqtn = priv->indir_rqt.rqtn;
a5f97fee 2590 mlx5e_redirect_rqt(priv, rqtn, MLX5E_INDIR_RQT_SIZE, rrp);
398f3351
HHZ
2591 }
2592
694826e3 2593 for (ix = 0; ix < priv->max_nch; ix++) {
a5f97fee
SM
2594 struct mlx5e_redirect_rqt_param direct_rrp = {
2595 .is_rss = false,
95632791
AM
2596 {
2597 .rqn = mlx5e_get_direct_rqn(priv, ix, rrp)
2598 },
a5f97fee
SM
2599 };
2600
2601 /* Direct RQ Tables */
398f3351
HHZ
2602 if (!priv->direct_tir[ix].rqt.enabled)
2603 continue;
a5f97fee 2604
398f3351 2605 rqtn = priv->direct_tir[ix].rqt.rqtn;
a5f97fee 2606 mlx5e_redirect_rqt(priv, rqtn, 1, direct_rrp);
1da36696 2607 }
40ab6a6e
AS
2608}
2609
a5f97fee
SM
2610static void mlx5e_redirect_rqts_to_channels(struct mlx5e_priv *priv,
2611 struct mlx5e_channels *chs)
2612{
2613 struct mlx5e_redirect_rqt_param rrp = {
2614 .is_rss = true,
95632791
AM
2615 {
2616 .rss = {
2617 .channels = chs,
bbeb53b8 2618 .hfunc = priv->rss_params.hfunc,
95632791
AM
2619 }
2620 },
a5f97fee
SM
2621 };
2622
2623 mlx5e_redirect_rqts(priv, rrp);
2624}
2625
2626static void mlx5e_redirect_rqts_to_drop(struct mlx5e_priv *priv)
2627{
2628 struct mlx5e_redirect_rqt_param drop_rrp = {
2629 .is_rss = false,
95632791
AM
2630 {
2631 .rqn = priv->drop_rq.rqn,
2632 },
a5f97fee
SM
2633 };
2634
2635 mlx5e_redirect_rqts(priv, drop_rrp);
2636}
2637
d930ac79
AL
2638static const struct mlx5e_tirc_config tirc_default_config[MLX5E_NUM_INDIR_TIRS] = {
2639 [MLX5E_TT_IPV4_TCP] = { .l3_prot_type = MLX5_L3_PROT_TYPE_IPV4,
2640 .l4_prot_type = MLX5_L4_PROT_TYPE_TCP,
2641 .rx_hash_fields = MLX5_HASH_IP_L4PORTS,
2642 },
2643 [MLX5E_TT_IPV6_TCP] = { .l3_prot_type = MLX5_L3_PROT_TYPE_IPV6,
2644 .l4_prot_type = MLX5_L4_PROT_TYPE_TCP,
2645 .rx_hash_fields = MLX5_HASH_IP_L4PORTS,
2646 },
2647 [MLX5E_TT_IPV4_UDP] = { .l3_prot_type = MLX5_L3_PROT_TYPE_IPV4,
2648 .l4_prot_type = MLX5_L4_PROT_TYPE_UDP,
2649 .rx_hash_fields = MLX5_HASH_IP_L4PORTS,
2650 },
2651 [MLX5E_TT_IPV6_UDP] = { .l3_prot_type = MLX5_L3_PROT_TYPE_IPV6,
2652 .l4_prot_type = MLX5_L4_PROT_TYPE_UDP,
2653 .rx_hash_fields = MLX5_HASH_IP_L4PORTS,
2654 },
2655 [MLX5E_TT_IPV4_IPSEC_AH] = { .l3_prot_type = MLX5_L3_PROT_TYPE_IPV4,
2656 .l4_prot_type = 0,
2657 .rx_hash_fields = MLX5_HASH_IP_IPSEC_SPI,
2658 },
2659 [MLX5E_TT_IPV6_IPSEC_AH] = { .l3_prot_type = MLX5_L3_PROT_TYPE_IPV6,
2660 .l4_prot_type = 0,
2661 .rx_hash_fields = MLX5_HASH_IP_IPSEC_SPI,
2662 },
2663 [MLX5E_TT_IPV4_IPSEC_ESP] = { .l3_prot_type = MLX5_L3_PROT_TYPE_IPV4,
2664 .l4_prot_type = 0,
2665 .rx_hash_fields = MLX5_HASH_IP_IPSEC_SPI,
2666 },
2667 [MLX5E_TT_IPV6_IPSEC_ESP] = { .l3_prot_type = MLX5_L3_PROT_TYPE_IPV6,
2668 .l4_prot_type = 0,
2669 .rx_hash_fields = MLX5_HASH_IP_IPSEC_SPI,
2670 },
2671 [MLX5E_TT_IPV4] = { .l3_prot_type = MLX5_L3_PROT_TYPE_IPV4,
2672 .l4_prot_type = 0,
2673 .rx_hash_fields = MLX5_HASH_IP,
2674 },
2675 [MLX5E_TT_IPV6] = { .l3_prot_type = MLX5_L3_PROT_TYPE_IPV6,
2676 .l4_prot_type = 0,
2677 .rx_hash_fields = MLX5_HASH_IP,
2678 },
2679};
2680
2681struct mlx5e_tirc_config mlx5e_tirc_get_default_config(enum mlx5e_traffic_types tt)
2682{
2683 return tirc_default_config[tt];
2684}
2685
6a9764ef 2686static void mlx5e_build_tir_ctx_lro(struct mlx5e_params *params, void *tirc)
5c50368f 2687{
6a9764ef 2688 if (!params->lro_en)
5c50368f
AS
2689 return;
2690
2691#define ROUGH_MAX_L2_L3_HDR_SZ 256
2692
2693 MLX5_SET(tirc, tirc, lro_enable_mask,
2694 MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO |
2695 MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO);
2696 MLX5_SET(tirc, tirc, lro_max_ip_payload_size,
0b77f230 2697 (MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ - ROUGH_MAX_L2_L3_HDR_SZ) >> 8);
6a9764ef 2698 MLX5_SET(tirc, tirc, lro_timeout_period_usecs, params->lro_timeout);
5c50368f
AS
2699}
2700
bbeb53b8 2701void mlx5e_build_indir_tir_ctx_hash(struct mlx5e_rss_params *rss_params,
d930ac79 2702 const struct mlx5e_tirc_config *ttconfig,
7b3722fa 2703 void *tirc, bool inner)
bdfc028d 2704{
7b3722fa
GP
2705 void *hfso = inner ? MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_inner) :
2706 MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer);
a100ff3e 2707
bbeb53b8
AL
2708 MLX5_SET(tirc, tirc, rx_hash_fn, mlx5e_rx_hash_fn(rss_params->hfunc));
2709 if (rss_params->hfunc == ETH_RSS_HASH_TOP) {
bdfc028d
TT
2710 void *rss_key = MLX5_ADDR_OF(tirc, tirc,
2711 rx_hash_toeplitz_key);
2712 size_t len = MLX5_FLD_SZ_BYTES(tirc,
2713 rx_hash_toeplitz_key);
2714
2715 MLX5_SET(tirc, tirc, rx_hash_symmetric, 1);
bbeb53b8 2716 memcpy(rss_key, rss_params->toeplitz_hash_key, len);
bdfc028d 2717 }
d930ac79
AL
2718 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2719 ttconfig->l3_prot_type);
2720 MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
2721 ttconfig->l4_prot_type);
2722 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2723 ttconfig->rx_hash_fields);
bdfc028d
TT
2724}
2725
756c4160
AL
2726static void mlx5e_update_rx_hash_fields(struct mlx5e_tirc_config *ttconfig,
2727 enum mlx5e_traffic_types tt,
2728 u32 rx_hash_fields)
2729{
2730 *ttconfig = tirc_default_config[tt];
2731 ttconfig->rx_hash_fields = rx_hash_fields;
2732}
2733
e0b4b472 2734void mlx5e_modify_tirs_hash(struct mlx5e_priv *priv, void *in)
080d1b17
AL
2735{
2736 void *tirc = MLX5_ADDR_OF(modify_tir_in, in, ctx);
756c4160 2737 struct mlx5e_rss_params *rss = &priv->rss_params;
080d1b17
AL
2738 struct mlx5_core_dev *mdev = priv->mdev;
2739 int ctxlen = MLX5_ST_SZ_BYTES(tirc);
756c4160 2740 struct mlx5e_tirc_config ttconfig;
080d1b17
AL
2741 int tt;
2742
2743 MLX5_SET(modify_tir_in, in, bitmask.hash, 1);
2744
2745 for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
2746 memset(tirc, 0, ctxlen);
756c4160
AL
2747 mlx5e_update_rx_hash_fields(&ttconfig, tt,
2748 rss->rx_hash_fields[tt]);
2749 mlx5e_build_indir_tir_ctx_hash(rss, &ttconfig, tirc, false);
e0b4b472 2750 mlx5_core_modify_tir(mdev, priv->indir_tir[tt].tirn, in);
080d1b17
AL
2751 }
2752
a16b8e0d
RD
2753 /* Verify inner tirs resources allocated */
2754 if (!priv->inner_indir_tir[0].tirn)
080d1b17
AL
2755 return;
2756
2757 for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
2758 memset(tirc, 0, ctxlen);
756c4160
AL
2759 mlx5e_update_rx_hash_fields(&ttconfig, tt,
2760 rss->rx_hash_fields[tt]);
2761 mlx5e_build_indir_tir_ctx_hash(rss, &ttconfig, tirc, true);
e0b4b472 2762 mlx5_core_modify_tir(mdev, priv->inner_indir_tir[tt].tirn, in);
080d1b17
AL
2763 }
2764}
2765
ab0394fe 2766static int mlx5e_modify_tirs_lro(struct mlx5e_priv *priv)
5c50368f
AS
2767{
2768 struct mlx5_core_dev *mdev = priv->mdev;
2769
2770 void *in;
2771 void *tirc;
2772 int inlen;
2773 int err;
ab0394fe 2774 int tt;
1da36696 2775 int ix;
5c50368f
AS
2776
2777 inlen = MLX5_ST_SZ_BYTES(modify_tir_in);
1b9a07ee 2778 in = kvzalloc(inlen, GFP_KERNEL);
5c50368f
AS
2779 if (!in)
2780 return -ENOMEM;
2781
2782 MLX5_SET(modify_tir_in, in, bitmask.lro, 1);
2783 tirc = MLX5_ADDR_OF(modify_tir_in, in, ctx);
2784
6a9764ef 2785 mlx5e_build_tir_ctx_lro(&priv->channels.params, tirc);
5c50368f 2786
1da36696 2787 for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
e0b4b472 2788 err = mlx5_core_modify_tir(mdev, priv->indir_tir[tt].tirn, in);
ab0394fe 2789 if (err)
1da36696 2790 goto free_in;
ab0394fe 2791 }
5c50368f 2792
694826e3 2793 for (ix = 0; ix < priv->max_nch; ix++) {
e0b4b472 2794 err = mlx5_core_modify_tir(mdev, priv->direct_tir[ix].tirn, in);
1da36696
TT
2795 if (err)
2796 goto free_in;
2797 }
2798
2799free_in:
5c50368f
AS
2800 kvfree(in);
2801
2802 return err;
2803}
2804
b9ab5d0e
MM
2805static MLX5E_DEFINE_PREACTIVATE_WRAPPER_CTX(mlx5e_modify_tirs_lro);
2806
472a1e44
TT
2807static int mlx5e_set_mtu(struct mlx5_core_dev *mdev,
2808 struct mlx5e_params *params, u16 mtu)
40ab6a6e 2809{
472a1e44 2810 u16 hw_mtu = MLX5E_SW2HW_MTU(params, mtu);
40ab6a6e
AS
2811 int err;
2812
cd255eff 2813 err = mlx5_set_port_mtu(mdev, hw_mtu, 1);
40ab6a6e
AS
2814 if (err)
2815 return err;
2816
cd255eff
SM
2817 /* Update vport context MTU */
2818 mlx5_modify_nic_vport_mtu(mdev, hw_mtu);
2819 return 0;
2820}
40ab6a6e 2821
472a1e44
TT
2822static void mlx5e_query_mtu(struct mlx5_core_dev *mdev,
2823 struct mlx5e_params *params, u16 *mtu)
cd255eff 2824{
cd255eff
SM
2825 u16 hw_mtu = 0;
2826 int err;
40ab6a6e 2827
cd255eff
SM
2828 err = mlx5_query_nic_vport_mtu(mdev, &hw_mtu);
2829 if (err || !hw_mtu) /* fallback to port oper mtu */
2830 mlx5_query_port_oper_mtu(mdev, &hw_mtu, 1);
2831
472a1e44 2832 *mtu = MLX5E_HW2SW_MTU(params, hw_mtu);
cd255eff
SM
2833}
2834
d9ee0491 2835int mlx5e_set_dev_port_mtu(struct mlx5e_priv *priv)
cd255eff 2836{
472a1e44 2837 struct mlx5e_params *params = &priv->channels.params;
2e20a151 2838 struct net_device *netdev = priv->netdev;
472a1e44 2839 struct mlx5_core_dev *mdev = priv->mdev;
cd255eff
SM
2840 u16 mtu;
2841 int err;
2842
472a1e44 2843 err = mlx5e_set_mtu(mdev, params, params->sw_mtu);
cd255eff
SM
2844 if (err)
2845 return err;
40ab6a6e 2846
472a1e44
TT
2847 mlx5e_query_mtu(mdev, params, &mtu);
2848 if (mtu != params->sw_mtu)
cd255eff 2849 netdev_warn(netdev, "%s: VPort MTU %d is different than netdev mtu %d\n",
472a1e44 2850 __func__, mtu, params->sw_mtu);
40ab6a6e 2851
472a1e44 2852 params->sw_mtu = mtu;
40ab6a6e
AS
2853 return 0;
2854}
2855
b9ab5d0e
MM
2856MLX5E_DEFINE_PREACTIVATE_WRAPPER_CTX(mlx5e_set_dev_port_mtu);
2857
6d7ee2ed
TT
2858void mlx5e_set_netdev_mtu_boundaries(struct mlx5e_priv *priv)
2859{
2860 struct mlx5e_params *params = &priv->channels.params;
2861 struct net_device *netdev = priv->netdev;
2862 struct mlx5_core_dev *mdev = priv->mdev;
2863 u16 max_mtu;
2864
2865 /* MTU range: 68 - hw-specific max */
2866 netdev->min_mtu = ETH_MIN_MTU;
2867
2868 mlx5_query_port_max_mtu(mdev, &max_mtu, 1);
2869 netdev->max_mtu = min_t(unsigned int, MLX5E_HW2SW_MTU(params, max_mtu),
2870 ETH_MAX_MTU);
2871}
2872
fa374877 2873static void mlx5e_netdev_set_tcs(struct net_device *netdev, u16 nch, u8 ntc)
08fb1dac 2874{
08fb1dac
SM
2875 int tc;
2876
2877 netdev_reset_tc(netdev);
2878
2879 if (ntc == 1)
2880 return;
2881
2882 netdev_set_num_tc(netdev, ntc);
2883
7ccdd084
RS
2884 /* Map netdev TCs to offset 0
2885 * We have our own UP to TXQ mapping for QoS
2886 */
08fb1dac 2887 for (tc = 0; tc < ntc; tc++)
7ccdd084 2888 netdev_set_tc_queue(netdev, tc, nch, 0);
08fb1dac
SM
2889}
2890
fa374877 2891static int mlx5e_update_netdev_queues(struct mlx5e_priv *priv)
c2c95271 2892{
c2c95271 2893 struct net_device *netdev = priv->netdev;
fa374877
MM
2894 int num_txqs, num_rxqs, nch, ntc;
2895 int old_num_txqs, old_ntc;
2896 int err;
2897
2898 old_num_txqs = netdev->real_num_tx_queues;
2899 old_ntc = netdev->num_tc;
c2c95271 2900
fa374877
MM
2901 nch = priv->channels.params.num_channels;
2902 ntc = priv->channels.params.num_tc;
2903 num_txqs = nch * ntc;
2904 num_rxqs = nch * priv->profile->rq_groups;
2905
2906 mlx5e_netdev_set_tcs(netdev, nch, ntc);
2907
2908 err = netif_set_real_num_tx_queues(netdev, num_txqs);
2909 if (err) {
2910 netdev_warn(netdev, "netif_set_real_num_tx_queues failed, %d\n", err);
2911 goto err_tcs;
2912 }
2913 err = netif_set_real_num_rx_queues(netdev, num_rxqs);
2914 if (err) {
2915 netdev_warn(netdev, "netif_set_real_num_rx_queues failed, %d\n", err);
2916 goto err_txqs;
2917 }
2918
2919 return 0;
2920
2921err_txqs:
2922 /* netif_set_real_num_rx_queues could fail only when nch increased. Only
2923 * one of nch and ntc is changed in this function. That means, the call
2924 * to netif_set_real_num_tx_queues below should not fail, because it
2925 * decreases the number of TX queues.
2926 */
2927 WARN_ON_ONCE(netif_set_real_num_tx_queues(netdev, old_num_txqs));
2928
2929err_tcs:
2930 mlx5e_netdev_set_tcs(netdev, old_num_txqs / old_ntc, old_ntc);
2931 return err;
c2c95271
MM
2932}
2933
3909a12e
MM
2934static void mlx5e_set_default_xps_cpumasks(struct mlx5e_priv *priv,
2935 struct mlx5e_params *params)
2936{
2937 struct mlx5_core_dev *mdev = priv->mdev;
2938 int num_comp_vectors, ix, irq;
2939
2940 num_comp_vectors = mlx5_comp_vectors_count(mdev);
2941
2942 for (ix = 0; ix < params->num_channels; ix++) {
2943 cpumask_clear(priv->scratchpad.cpumask);
2944
2945 for (irq = ix; irq < num_comp_vectors; irq += params->num_channels) {
2946 int cpu = cpumask_first(mlx5_comp_irq_get_affinity_mask(mdev, irq));
2947
2948 cpumask_set_cpu(cpu, priv->scratchpad.cpumask);
2949 }
2950
2951 netif_set_xps_queue(priv->netdev, priv->scratchpad.cpumask, ix);
2952 }
2953}
2954
fe867cac
MM
2955int mlx5e_num_channels_changed(struct mlx5e_priv *priv)
2956{
2957 u16 count = priv->channels.params.num_channels;
fa374877
MM
2958 int err;
2959
2960 err = mlx5e_update_netdev_queues(priv);
2961 if (err)
2962 return err;
fe867cac 2963
3909a12e
MM
2964 mlx5e_set_default_xps_cpumasks(priv, &priv->channels.params);
2965
fe867cac
MM
2966 if (!netif_is_rxfh_configured(priv->netdev))
2967 mlx5e_build_default_indir_rqt(priv->rss_params.indirection_rqt,
2968 MLX5E_INDIR_RQT_SIZE, count);
2969
2970 return 0;
2971}
2972
b9ab5d0e
MM
2973MLX5E_DEFINE_PREACTIVATE_WRAPPER_CTX(mlx5e_num_channels_changed);
2974
c55d8b10 2975static void mlx5e_build_txq_maps(struct mlx5e_priv *priv)
acc6c595 2976{
c55d8b10 2977 int i, ch;
acc6c595 2978
c55d8b10 2979 ch = priv->channels.num;
8bfaf07f 2980
c55d8b10
EBE
2981 for (i = 0; i < ch; i++) {
2982 int tc;
2983
2984 for (tc = 0; tc < priv->channels.params.num_tc; tc++) {
2985 struct mlx5e_channel *c = priv->channels.c[i];
2986 struct mlx5e_txqsq *sq = &c->sq[tc];
acc6c595 2987
acc6c595 2988 priv->txq2sq[sq->txq_ix] = sq;
c55d8b10 2989 priv->channel_tc2realtxq[i][tc] = i + tc * ch;
acc6c595
SM
2990 }
2991 }
2992}
2993
603f4a45 2994void mlx5e_activate_priv_channels(struct mlx5e_priv *priv)
acc6c595 2995{
c55d8b10 2996 mlx5e_build_txq_maps(priv);
acc6c595 2997 mlx5e_activate_channels(&priv->channels);
407e17b1 2998 mlx5e_xdp_tx_enable(priv);
acc6c595 2999 netif_tx_start_all_queues(priv->netdev);
9008ae07 3000
d9ee0491 3001 if (mlx5e_is_vport_rep(priv))
9008ae07
SM
3002 mlx5e_add_sqs_fwd_rules(priv);
3003
acc6c595 3004 mlx5e_wait_channels_min_rx_wqes(&priv->channels);
9008ae07 3005 mlx5e_redirect_rqts_to_channels(priv, &priv->channels);
db05815b
MM
3006
3007 mlx5e_xsk_redirect_rqts_to_channels(priv, &priv->channels);
acc6c595
SM
3008}
3009
603f4a45 3010void mlx5e_deactivate_priv_channels(struct mlx5e_priv *priv)
acc6c595 3011{
db05815b
MM
3012 mlx5e_xsk_redirect_rqts_to_drop(priv, &priv->channels);
3013
9008ae07
SM
3014 mlx5e_redirect_rqts_to_drop(priv);
3015
d9ee0491 3016 if (mlx5e_is_vport_rep(priv))
9008ae07
SM
3017 mlx5e_remove_sqs_fwd_rules(priv);
3018
acc6c595
SM
3019 /* FIXME: This is a W/A only for tx timeout watch dog false alarm when
3020 * polling for inactive tx queues.
3021 */
3022 netif_tx_stop_all_queues(priv->netdev);
3023 netif_tx_disable(priv->netdev);
407e17b1 3024 mlx5e_xdp_tx_disable(priv);
acc6c595
SM
3025 mlx5e_deactivate_channels(&priv->channels);
3026}
3027
35a78ed4
MM
3028static int mlx5e_switch_priv_channels(struct mlx5e_priv *priv,
3029 struct mlx5e_channels *new_chs,
b9ab5d0e
MM
3030 mlx5e_fp_preactivate preactivate,
3031 void *context)
55c2503d
SM
3032{
3033 struct net_device *netdev = priv->netdev;
35a78ed4 3034 struct mlx5e_channels old_chs;
7ca42c80 3035 int carrier_ok;
35a78ed4 3036 int err = 0;
877662e2 3037
7ca42c80 3038 carrier_ok = netif_carrier_ok(netdev);
55c2503d
SM
3039 netif_carrier_off(netdev);
3040
55c2503d 3041 mlx5e_deactivate_priv_channels(priv);
55c2503d 3042
35a78ed4 3043 old_chs = priv->channels;
55c2503d
SM
3044 priv->channels = *new_chs;
3045
dca147b3
MM
3046 /* New channels are ready to roll, call the preactivate hook if needed
3047 * to modify HW settings or update kernel parameters.
3048 */
35a78ed4 3049 if (preactivate) {
b9ab5d0e 3050 err = preactivate(priv, context);
35a78ed4
MM
3051 if (err) {
3052 priv->channels = old_chs;
3053 goto out;
3054 }
3055 }
2e20a151 3056
35a78ed4 3057 mlx5e_close_channels(&old_chs);
a90f88fe 3058 priv->profile->update_rx(priv);
35a78ed4
MM
3059
3060out:
55c2503d
SM
3061 mlx5e_activate_priv_channels(priv);
3062
7ca42c80
ES
3063 /* return carrier back if needed */
3064 if (carrier_ok)
3065 netif_carrier_on(netdev);
35a78ed4
MM
3066
3067 return err;
55c2503d
SM
3068}
3069
877662e2
TT
3070int mlx5e_safe_switch_channels(struct mlx5e_priv *priv,
3071 struct mlx5e_channels *new_chs,
b9ab5d0e
MM
3072 mlx5e_fp_preactivate preactivate,
3073 void *context)
877662e2
TT
3074{
3075 int err;
3076
3077 err = mlx5e_open_channels(priv, new_chs);
3078 if (err)
3079 return err;
3080
b9ab5d0e 3081 err = mlx5e_switch_priv_channels(priv, new_chs, preactivate, context);
35a78ed4
MM
3082 if (err)
3083 goto err_close;
3084
877662e2 3085 return 0;
35a78ed4
MM
3086
3087err_close:
3088 mlx5e_close_channels(new_chs);
3089
3090 return err;
877662e2
TT
3091}
3092
484c1ada
EBE
3093int mlx5e_safe_reopen_channels(struct mlx5e_priv *priv)
3094{
3095 struct mlx5e_channels new_channels = {};
3096
3097 new_channels.params = priv->channels.params;
b9ab5d0e 3098 return mlx5e_safe_switch_channels(priv, &new_channels, NULL, NULL);
484c1ada
EBE
3099}
3100
237f258c 3101void mlx5e_timestamp_init(struct mlx5e_priv *priv)
7c39afb3
FD
3102{
3103 priv->tstamp.tx_type = HWTSTAMP_TX_OFF;
3104 priv->tstamp.rx_filter = HWTSTAMP_FILTER_NONE;
3105}
3106
7d0314b1
RD
3107static void mlx5e_modify_admin_state(struct mlx5_core_dev *mdev,
3108 enum mlx5_port_status state)
3109{
3110 struct mlx5_eswitch *esw = mdev->priv.eswitch;
3111 int vport_admin_state;
3112
3113 mlx5_set_port_admin_status(mdev, state);
3114
3115 if (!MLX5_ESWITCH_MANAGER(mdev) || mlx5_eswitch_mode(esw) == MLX5_ESWITCH_OFFLOADS)
3116 return;
3117
3118 if (state == MLX5_PORT_UP)
3119 vport_admin_state = MLX5_VPORT_ADMIN_STATE_AUTO;
3120 else
3121 vport_admin_state = MLX5_VPORT_ADMIN_STATE_DOWN;
3122
3123 mlx5_eswitch_set_vport_state(esw, MLX5_VPORT_UPLINK, vport_admin_state);
3124}
3125
40ab6a6e
AS
3126int mlx5e_open_locked(struct net_device *netdev)
3127{
3128 struct mlx5e_priv *priv = netdev_priv(netdev);
40ab6a6e
AS
3129 int err;
3130
3131 set_bit(MLX5E_STATE_OPENED, &priv->state);
3132
ff9c852f 3133 err = mlx5e_open_channels(priv, &priv->channels);
acc6c595 3134 if (err)
343b29f3 3135 goto err_clear_state_opened_flag;
40ab6a6e 3136
a90f88fe 3137 priv->profile->update_rx(priv);
acc6c595 3138 mlx5e_activate_priv_channels(priv);
7ca42c80
ES
3139 if (priv->profile->update_carrier)
3140 priv->profile->update_carrier(priv);
be4891af 3141
cdeef2b1 3142 mlx5e_queue_update_stats(priv);
9b37b07f 3143 return 0;
343b29f3
AS
3144
3145err_clear_state_opened_flag:
3146 clear_bit(MLX5E_STATE_OPENED, &priv->state);
3147 return err;
40ab6a6e
AS
3148}
3149
cb67b832 3150int mlx5e_open(struct net_device *netdev)
40ab6a6e
AS
3151{
3152 struct mlx5e_priv *priv = netdev_priv(netdev);
3153 int err;
3154
3155 mutex_lock(&priv->state_lock);
3156 err = mlx5e_open_locked(netdev);
63bfd399 3157 if (!err)
7d0314b1 3158 mlx5e_modify_admin_state(priv->mdev, MLX5_PORT_UP);
40ab6a6e
AS
3159 mutex_unlock(&priv->state_lock);
3160
3161 return err;
3162}
3163
3164int mlx5e_close_locked(struct net_device *netdev)
3165{
3166 struct mlx5e_priv *priv = netdev_priv(netdev);
3167
a1985740
AS
3168 /* May already be CLOSED in case a previous configuration operation
3169 * (e.g RX/TX queue size change) that involves close&open failed.
3170 */
3171 if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
3172 return 0;
3173
40ab6a6e
AS
3174 clear_bit(MLX5E_STATE_OPENED, &priv->state);
3175
40ab6a6e 3176 netif_carrier_off(priv->netdev);
acc6c595
SM
3177 mlx5e_deactivate_priv_channels(priv);
3178 mlx5e_close_channels(&priv->channels);
40ab6a6e
AS
3179
3180 return 0;
3181}
3182
cb67b832 3183int mlx5e_close(struct net_device *netdev)
40ab6a6e
AS
3184{
3185 struct mlx5e_priv *priv = netdev_priv(netdev);
3186 int err;
3187
26e59d80
MHY
3188 if (!netif_device_present(netdev))
3189 return -ENODEV;
3190
40ab6a6e 3191 mutex_lock(&priv->state_lock);
7d0314b1 3192 mlx5e_modify_admin_state(priv->mdev, MLX5_PORT_DOWN);
40ab6a6e
AS
3193 err = mlx5e_close_locked(netdev);
3194 mutex_unlock(&priv->state_lock);
3195
3196 return err;
3197}
3198
a43b25da 3199static int mlx5e_alloc_drop_rq(struct mlx5_core_dev *mdev,
3b77235b
SM
3200 struct mlx5e_rq *rq,
3201 struct mlx5e_rq_param *param)
40ab6a6e 3202{
40ab6a6e
AS
3203 void *rqc = param->rqc;
3204 void *rqc_wq = MLX5_ADDR_OF(rqc, rqc, wq);
3205 int err;
3206
3207 param->wq.db_numa_node = param->wq.buf_numa_node;
3208
99cbfa93
TT
3209 err = mlx5_wq_cyc_create(mdev, &param->wq, rqc_wq, &rq->wqe.wq,
3210 &rq->wq_ctrl);
40ab6a6e
AS
3211 if (err)
3212 return err;
3213
0ddf5432
JDB
3214 /* Mark as unused given "Drop-RQ" packets never reach XDP */
3215 xdp_rxq_info_unused(&rq->xdp_rxq);
3216
a43b25da 3217 rq->mdev = mdev;
40ab6a6e
AS
3218
3219 return 0;
3220}
3221
a43b25da 3222static int mlx5e_alloc_drop_cq(struct mlx5_core_dev *mdev,
3b77235b
SM
3223 struct mlx5e_cq *cq,
3224 struct mlx5e_cq_param *param)
40ab6a6e 3225{
7be3412a
PP
3226 param->wq.buf_numa_node = dev_to_node(mlx5_core_dma_dev(mdev));
3227 param->wq.db_numa_node = dev_to_node(mlx5_core_dma_dev(mdev));
2f0db879 3228
95b6c6a5 3229 return mlx5e_alloc_cq_common(mdev, param, cq);
40ab6a6e
AS
3230}
3231
1462e48d
RD
3232int mlx5e_open_drop_rq(struct mlx5e_priv *priv,
3233 struct mlx5e_rq *drop_rq)
40ab6a6e 3234{
7cbaf9a3 3235 struct mlx5_core_dev *mdev = priv->mdev;
a43b25da
SM
3236 struct mlx5e_cq_param cq_param = {};
3237 struct mlx5e_rq_param rq_param = {};
3238 struct mlx5e_cq *cq = &drop_rq->cq;
40ab6a6e
AS
3239 int err;
3240
7cbaf9a3 3241 mlx5e_build_drop_rq_param(priv, &rq_param);
40ab6a6e 3242
a43b25da 3243 err = mlx5e_alloc_drop_cq(mdev, cq, &cq_param);
40ab6a6e
AS
3244 if (err)
3245 return err;
3246
3b77235b 3247 err = mlx5e_create_cq(cq, &cq_param);
40ab6a6e 3248 if (err)
3b77235b 3249 goto err_free_cq;
40ab6a6e 3250
a43b25da 3251 err = mlx5e_alloc_drop_rq(mdev, drop_rq, &rq_param);
40ab6a6e 3252 if (err)
3b77235b 3253 goto err_destroy_cq;
40ab6a6e 3254
a43b25da 3255 err = mlx5e_create_rq(drop_rq, &rq_param);
40ab6a6e 3256 if (err)
3b77235b 3257 goto err_free_rq;
40ab6a6e 3258
7cbaf9a3
MS
3259 err = mlx5e_modify_rq_state(drop_rq, MLX5_RQC_STATE_RST, MLX5_RQC_STATE_RDY);
3260 if (err)
3261 mlx5_core_warn(priv->mdev, "modify_rq_state failed, rx_if_down_packets won't be counted %d\n", err);
3262
40ab6a6e
AS
3263 return 0;
3264
3b77235b 3265err_free_rq:
a43b25da 3266 mlx5e_free_rq(drop_rq);
40ab6a6e
AS
3267
3268err_destroy_cq:
a43b25da 3269 mlx5e_destroy_cq(cq);
40ab6a6e 3270
3b77235b 3271err_free_cq:
a43b25da 3272 mlx5e_free_cq(cq);
3b77235b 3273
40ab6a6e
AS
3274 return err;
3275}
3276
1462e48d 3277void mlx5e_close_drop_rq(struct mlx5e_rq *drop_rq)
40ab6a6e 3278{
a43b25da
SM
3279 mlx5e_destroy_rq(drop_rq);
3280 mlx5e_free_rq(drop_rq);
3281 mlx5e_destroy_cq(&drop_rq->cq);
3282 mlx5e_free_cq(&drop_rq->cq);
40ab6a6e
AS
3283}
3284
2b257a6e 3285int mlx5e_create_tis(struct mlx5_core_dev *mdev, void *in, u32 *tisn)
40ab6a6e 3286{
40ab6a6e
AS
3287 void *tisc = MLX5_ADDR_OF(create_tis_in, in, ctx);
3288
b50d292b 3289 MLX5_SET(tisc, tisc, transport_domain, mdev->mlx5e_res.td.tdn);
db60b802 3290
d2ead1f3
TT
3291 if (MLX5_GET(tisc, tisc, tls_en))
3292 MLX5_SET(tisc, tisc, pd, mdev->mlx5e_res.pdn);
3293
db60b802
AH
3294 if (mlx5_lag_is_lacp_owner(mdev))
3295 MLX5_SET(tisc, tisc, strict_lag_tx_port_affinity, 1);
3296
e0b4b472 3297 return mlx5_core_create_tis(mdev, in, tisn);
40ab6a6e
AS
3298}
3299
5426a0b2 3300void mlx5e_destroy_tis(struct mlx5_core_dev *mdev, u32 tisn)
40ab6a6e 3301{
5426a0b2 3302 mlx5_core_destroy_tis(mdev, tisn);
40ab6a6e
AS
3303}
3304
3c145626
TT
3305void mlx5e_destroy_tises(struct mlx5e_priv *priv)
3306{
45f171b1 3307 int tc, i;
3c145626 3308
45f171b1
MM
3309 for (i = 0; i < mlx5e_get_num_lag_ports(priv->mdev); i++)
3310 for (tc = 0; tc < priv->profile->max_tc; tc++)
3311 mlx5e_destroy_tis(priv->mdev, priv->tisn[i][tc]);
3312}
3313
3314static bool mlx5e_lag_should_assign_affinity(struct mlx5_core_dev *mdev)
3315{
3316 return MLX5_CAP_GEN(mdev, lag_tx_port_affinity) && mlx5e_get_num_lag_ports(mdev) > 1;
3c145626
TT
3317}
3318
cb67b832 3319int mlx5e_create_tises(struct mlx5e_priv *priv)
40ab6a6e 3320{
45f171b1 3321 int tc, i;
40ab6a6e 3322 int err;
40ab6a6e 3323
45f171b1
MM
3324 for (i = 0; i < mlx5e_get_num_lag_ports(priv->mdev); i++) {
3325 for (tc = 0; tc < priv->profile->max_tc; tc++) {
3326 u32 in[MLX5_ST_SZ_DW(create_tis_in)] = {};
3327 void *tisc;
2b257a6e 3328
45f171b1 3329 tisc = MLX5_ADDR_OF(create_tis_in, in, ctx);
2b257a6e 3330
45f171b1 3331 MLX5_SET(tisc, tisc, prio, tc << 1);
2b257a6e 3332
45f171b1
MM
3333 if (mlx5e_lag_should_assign_affinity(priv->mdev))
3334 MLX5_SET(tisc, tisc, lag_tx_port_affinity, i + 1);
3335
3336 err = mlx5e_create_tis(priv->mdev, in, &priv->tisn[i][tc]);
3337 if (err)
3338 goto err_close_tises;
3339 }
40ab6a6e
AS
3340 }
3341
3342 return 0;
3343
3344err_close_tises:
45f171b1
MM
3345 for (; i >= 0; i--) {
3346 for (tc--; tc >= 0; tc--)
3347 mlx5e_destroy_tis(priv->mdev, priv->tisn[i][tc]);
3348 tc = priv->profile->max_tc;
3349 }
40ab6a6e
AS
3350
3351 return err;
3352}
3353
d9ee0491 3354static void mlx5e_cleanup_nic_tx(struct mlx5e_priv *priv)
40ab6a6e 3355{
3c145626 3356 mlx5e_destroy_tises(priv);
40ab6a6e
AS
3357}
3358
7306c274
TT
3359static void mlx5e_build_indir_tir_ctx_common(struct mlx5e_priv *priv,
3360 u32 rqtn, u32 *tirc)
f62b8bb8 3361{
b50d292b 3362 MLX5_SET(tirc, tirc, transport_domain, priv->mdev->mlx5e_res.td.tdn);
7306c274
TT
3363 MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT);
3364 MLX5_SET(tirc, tirc, indirect_table, rqtn);
69dad68d
TT
3365 MLX5_SET(tirc, tirc, tunneled_offload_en,
3366 priv->channels.params.tunneled_offload_en);
3191e05f 3367
6a9764ef 3368 mlx5e_build_tir_ctx_lro(&priv->channels.params, tirc);
7306c274 3369}
f62b8bb8 3370
7306c274
TT
3371static void mlx5e_build_indir_tir_ctx(struct mlx5e_priv *priv,
3372 enum mlx5e_traffic_types tt,
3373 u32 *tirc)
3374{
3375 mlx5e_build_indir_tir_ctx_common(priv, priv->indir_rqt.rqtn, tirc);
bbeb53b8 3376 mlx5e_build_indir_tir_ctx_hash(&priv->rss_params,
d930ac79 3377 &tirc_default_config[tt], tirc, false);
f62b8bb8
AV
3378}
3379
6a9764ef 3380static void mlx5e_build_direct_tir_ctx(struct mlx5e_priv *priv, u32 rqtn, u32 *tirc)
f62b8bb8 3381{
7306c274 3382 mlx5e_build_indir_tir_ctx_common(priv, rqtn, tirc);
1da36696
TT
3383 MLX5_SET(tirc, tirc, rx_hash_fn, MLX5_RX_HASH_FN_INVERTED_XOR8);
3384}
3385
7306c274
TT
3386static void mlx5e_build_inner_indir_tir_ctx(struct mlx5e_priv *priv,
3387 enum mlx5e_traffic_types tt,
3388 u32 *tirc)
3389{
3390 mlx5e_build_indir_tir_ctx_common(priv, priv->indir_rqt.rqtn, tirc);
3391 mlx5e_build_indir_tir_ctx_hash(&priv->rss_params,
3392 &tirc_default_config[tt], tirc, true);
7306c274
TT
3393}
3394
46dc933c 3395int mlx5e_create_indirect_tirs(struct mlx5e_priv *priv, bool inner_ttc)
1da36696 3396{
724b2aa1 3397 struct mlx5e_tir *tir;
f62b8bb8
AV
3398 void *tirc;
3399 int inlen;
7b3722fa 3400 int i = 0;
f62b8bb8 3401 int err;
1da36696 3402 u32 *in;
1da36696 3403 int tt;
f62b8bb8
AV
3404
3405 inlen = MLX5_ST_SZ_BYTES(create_tir_in);
1b9a07ee 3406 in = kvzalloc(inlen, GFP_KERNEL);
f62b8bb8
AV
3407 if (!in)
3408 return -ENOMEM;
3409
1da36696
TT
3410 for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
3411 memset(in, 0, inlen);
724b2aa1 3412 tir = &priv->indir_tir[tt];
1da36696 3413 tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
6a9764ef 3414 mlx5e_build_indir_tir_ctx(priv, tt, tirc);
e0b4b472 3415 err = mlx5e_create_tir(priv->mdev, tir, in);
7b3722fa
GP
3416 if (err) {
3417 mlx5_core_warn(priv->mdev, "create indirect tirs failed, %d\n", err);
3418 goto err_destroy_inner_tirs;
3419 }
f62b8bb8
AV
3420 }
3421
46dc933c 3422 if (!inner_ttc || !mlx5e_tunnel_inner_ft_supported(priv->mdev))
7b3722fa
GP
3423 goto out;
3424
3425 for (i = 0; i < MLX5E_NUM_INDIR_TIRS; i++) {
3426 memset(in, 0, inlen);
3427 tir = &priv->inner_indir_tir[i];
3428 tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
3429 mlx5e_build_inner_indir_tir_ctx(priv, i, tirc);
e0b4b472 3430 err = mlx5e_create_tir(priv->mdev, tir, in);
7b3722fa
GP
3431 if (err) {
3432 mlx5_core_warn(priv->mdev, "create inner indirect tirs failed, %d\n", err);
3433 goto err_destroy_inner_tirs;
3434 }
3435 }
3436
3437out:
6bfd390b
HHZ
3438 kvfree(in);
3439
3440 return 0;
3441
7b3722fa
GP
3442err_destroy_inner_tirs:
3443 for (i--; i >= 0; i--)
3444 mlx5e_destroy_tir(priv->mdev, &priv->inner_indir_tir[i]);
3445
6bfd390b
HHZ
3446 for (tt--; tt >= 0; tt--)
3447 mlx5e_destroy_tir(priv->mdev, &priv->indir_tir[tt]);
3448
3449 kvfree(in);
3450
3451 return err;
3452}
3453
db05815b 3454int mlx5e_create_direct_tirs(struct mlx5e_priv *priv, struct mlx5e_tir *tirs)
6bfd390b 3455{
6bfd390b
HHZ
3456 struct mlx5e_tir *tir;
3457 void *tirc;
3458 int inlen;
db05815b 3459 int err = 0;
6bfd390b
HHZ
3460 u32 *in;
3461 int ix;
3462
3463 inlen = MLX5_ST_SZ_BYTES(create_tir_in);
1b9a07ee 3464 in = kvzalloc(inlen, GFP_KERNEL);
6bfd390b
HHZ
3465 if (!in)
3466 return -ENOMEM;
3467
694826e3 3468 for (ix = 0; ix < priv->max_nch; ix++) {
1da36696 3469 memset(in, 0, inlen);
db05815b 3470 tir = &tirs[ix];
1da36696 3471 tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
db05815b 3472 mlx5e_build_direct_tir_ctx(priv, tir->rqt.rqtn, tirc);
e0b4b472 3473 err = mlx5e_create_tir(priv->mdev, tir, in);
db05815b 3474 if (unlikely(err))
1da36696
TT
3475 goto err_destroy_ch_tirs;
3476 }
3477
db05815b 3478 goto out;
f62b8bb8 3479
1da36696 3480err_destroy_ch_tirs:
db05815b 3481 mlx5_core_warn(priv->mdev, "create tirs failed, %d\n", err);
1da36696 3482 for (ix--; ix >= 0; ix--)
db05815b 3483 mlx5e_destroy_tir(priv->mdev, &tirs[ix]);
1da36696 3484
db05815b 3485out:
1da36696 3486 kvfree(in);
f62b8bb8
AV
3487
3488 return err;
3489}
3490
a16b8e0d 3491void mlx5e_destroy_indirect_tirs(struct mlx5e_priv *priv)
f62b8bb8
AV
3492{
3493 int i;
3494
1da36696 3495 for (i = 0; i < MLX5E_NUM_INDIR_TIRS; i++)
724b2aa1 3496 mlx5e_destroy_tir(priv->mdev, &priv->indir_tir[i]);
7b3722fa 3497
a16b8e0d
RD
3498 /* Verify inner tirs resources allocated */
3499 if (!priv->inner_indir_tir[0].tirn)
7b3722fa
GP
3500 return;
3501
3502 for (i = 0; i < MLX5E_NUM_INDIR_TIRS; i++)
3503 mlx5e_destroy_tir(priv->mdev, &priv->inner_indir_tir[i]);
f62b8bb8
AV
3504}
3505
db05815b 3506void mlx5e_destroy_direct_tirs(struct mlx5e_priv *priv, struct mlx5e_tir *tirs)
6bfd390b 3507{
6bfd390b
HHZ
3508 int i;
3509
694826e3 3510 for (i = 0; i < priv->max_nch; i++)
db05815b 3511 mlx5e_destroy_tir(priv->mdev, &tirs[i]);
6bfd390b
HHZ
3512}
3513
102722fc
GE
3514static int mlx5e_modify_channels_scatter_fcs(struct mlx5e_channels *chs, bool enable)
3515{
3516 int err = 0;
3517 int i;
3518
3519 for (i = 0; i < chs->num; i++) {
3520 err = mlx5e_modify_rq_scatter_fcs(&chs->c[i]->rq, enable);
3521 if (err)
3522 return err;
3523 }
3524
3525 return 0;
3526}
3527
f6d96a20 3528static int mlx5e_modify_channels_vsd(struct mlx5e_channels *chs, bool vsd)
36350114
GP
3529{
3530 int err = 0;
3531 int i;
3532
ff9c852f
SM
3533 for (i = 0; i < chs->num; i++) {
3534 err = mlx5e_modify_rq_vsd(&chs->c[i]->rq, vsd);
36350114
GP
3535 if (err)
3536 return err;
3537 }
3538
3539 return 0;
3540}
3541
2f1f5a77 3542static int mlx5e_setup_tc_mqprio(struct mlx5e_priv *priv,
0cf0f6d3 3543 struct tc_mqprio_qopt *mqprio)
08fb1dac 3544{
6f9485af 3545 struct mlx5e_channels new_channels = {};
0cf0f6d3 3546 u8 tc = mqprio->num_tc;
08fb1dac
SM
3547 int err = 0;
3548
0cf0f6d3
JP
3549 mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
3550
08fb1dac
SM
3551 if (tc && tc != MLX5E_MAX_NUM_TC)
3552 return -EINVAL;
3553
3554 mutex_lock(&priv->state_lock);
3555
6f9485af
SM
3556 new_channels.params = priv->channels.params;
3557 new_channels.params.num_tc = tc ? tc : 1;
08fb1dac 3558
20b6a1c7 3559 if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
6f9485af
SM
3560 priv->channels.params = new_channels.params;
3561 goto out;
3562 }
08fb1dac 3563
b9ab5d0e
MM
3564 err = mlx5e_safe_switch_channels(priv, &new_channels,
3565 mlx5e_num_channels_changed_ctx, NULL);
6f9485af
SM
3566 if (err)
3567 goto out;
08fb1dac 3568
05909bab
EBE
3569 priv->max_opened_tc = max_t(u8, priv->max_opened_tc,
3570 new_channels.params.num_tc);
6f9485af 3571out:
08fb1dac 3572 mutex_unlock(&priv->state_lock);
08fb1dac
SM
3573 return err;
3574}
3575
955bcb6e
PNA
3576static LIST_HEAD(mlx5e_block_cb_list);
3577
9afe9a53
OG
3578static int mlx5e_setup_tc(struct net_device *dev, enum tc_setup_type type,
3579 void *type_data)
0cf0f6d3 3580{
4e95bc26
PNA
3581 struct mlx5e_priv *priv = netdev_priv(dev);
3582
2572ac53 3583 switch (type) {
daa664a5
VB
3584 case TC_SETUP_BLOCK: {
3585 struct flow_block_offload *f = type_data;
3586
c9f14470 3587 f->unlocked_driver_cb = true;
955bcb6e
PNA
3588 return flow_block_cb_setup_simple(type_data,
3589 &mlx5e_block_cb_list,
4e95bc26
PNA
3590 mlx5e_setup_tc_block_cb,
3591 priv, priv, true);
daa664a5 3592 }
575ed7d3 3593 case TC_SETUP_QDISC_MQPRIO:
2f1f5a77 3594 return mlx5e_setup_tc_mqprio(priv, type_data);
e8f887ac
AV
3595 default:
3596 return -EOPNOTSUPP;
3597 }
08fb1dac
SM
3598}
3599
b832d4fd 3600void mlx5e_fold_sw_stats64(struct mlx5e_priv *priv, struct rtnl_link_stats64 *s)
9659e49a
SM
3601{
3602 int i;
3603
694826e3 3604 for (i = 0; i < priv->max_nch; i++) {
9659e49a 3605 struct mlx5e_channel_stats *channel_stats = &priv->channel_stats[i];
db05815b 3606 struct mlx5e_rq_stats *xskrq_stats = &channel_stats->xskrq;
9659e49a
SM
3607 struct mlx5e_rq_stats *rq_stats = &channel_stats->rq;
3608 int j;
3609
db05815b
MM
3610 s->rx_packets += rq_stats->packets + xskrq_stats->packets;
3611 s->rx_bytes += rq_stats->bytes + xskrq_stats->bytes;
47c97e6b 3612 s->multicast += rq_stats->mcast_packets + xskrq_stats->mcast_packets;
9659e49a
SM
3613
3614 for (j = 0; j < priv->max_opened_tc; j++) {
3615 struct mlx5e_sq_stats *sq_stats = &channel_stats->sq[j];
3616
3617 s->tx_packets += sq_stats->packets;
3618 s->tx_bytes += sq_stats->bytes;
3619 s->tx_dropped += sq_stats->dropped;
3620 }
3621 }
3622}
3623
d9ee0491 3624void
f62b8bb8
AV
3625mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
3626{
3627 struct mlx5e_priv *priv = netdev_priv(dev);
269e6b3a 3628 struct mlx5e_pport_stats *pstats = &priv->stats.pport;
f62b8bb8 3629
dcdf4ce0
ZY
3630 /* In switchdev mode, monitor counters doesn't monitor
3631 * rx/tx stats of 802_3. The update stats mechanism
3632 * should keep the 802_3 layout counters updated
3633 */
3634 if (!mlx5e_monitor_counter_supported(priv) ||
3635 mlx5e_is_uplink_rep(priv)) {
5c7e8bbb
ED
3636 /* update HW stats in background for next time */
3637 mlx5e_queue_update_stats(priv);
3638 }
ed56c519 3639
370bad0f
OG
3640 if (mlx5e_is_uplink_rep(priv)) {
3641 stats->rx_packets = PPORT_802_3_GET(pstats, a_frames_received_ok);
3642 stats->rx_bytes = PPORT_802_3_GET(pstats, a_octets_received_ok);
3643 stats->tx_packets = PPORT_802_3_GET(pstats, a_frames_transmitted_ok);
3644 stats->tx_bytes = PPORT_802_3_GET(pstats, a_octets_transmitted_ok);
3645 } else {
9659e49a 3646 mlx5e_fold_sw_stats64(priv, stats);
370bad0f 3647 }
269e6b3a
GP
3648
3649 stats->rx_dropped = priv->stats.qcnt.rx_out_of_buffer;
269e6b3a
GP
3650
3651 stats->rx_length_errors =
9218b44d
GP
3652 PPORT_802_3_GET(pstats, a_in_range_length_errors) +
3653 PPORT_802_3_GET(pstats, a_out_of_range_length_field) +
3654 PPORT_802_3_GET(pstats, a_frame_too_long_errors);
269e6b3a 3655 stats->rx_crc_errors =
9218b44d
GP
3656 PPORT_802_3_GET(pstats, a_frame_check_sequence_errors);
3657 stats->rx_frame_errors = PPORT_802_3_GET(pstats, a_alignment_errors);
3658 stats->tx_aborted_errors = PPORT_2863_GET(pstats, if_out_discards);
269e6b3a
GP
3659 stats->rx_errors = stats->rx_length_errors + stats->rx_crc_errors +
3660 stats->rx_frame_errors;
3661 stats->tx_errors = stats->tx_aborted_errors + stats->tx_carrier_errors;
f62b8bb8
AV
3662}
3663
3664static void mlx5e_set_rx_mode(struct net_device *dev)
3665{
3666 struct mlx5e_priv *priv = netdev_priv(dev);
3667
7bb29755 3668 queue_work(priv->wq, &priv->set_rx_mode_work);
f62b8bb8
AV
3669}
3670
3671static int mlx5e_set_mac(struct net_device *netdev, void *addr)
3672{
3673 struct mlx5e_priv *priv = netdev_priv(netdev);
3674 struct sockaddr *saddr = addr;
3675
3676 if (!is_valid_ether_addr(saddr->sa_data))
3677 return -EADDRNOTAVAIL;
3678
3679 netif_addr_lock_bh(netdev);
3680 ether_addr_copy(netdev->dev_addr, saddr->sa_data);
3681 netif_addr_unlock_bh(netdev);
3682
7bb29755 3683 queue_work(priv->wq, &priv->set_rx_mode_work);
f62b8bb8
AV
3684
3685 return 0;
3686}
3687
75b81ce7 3688#define MLX5E_SET_FEATURE(features, feature, enable) \
0e405443
GP
3689 do { \
3690 if (enable) \
75b81ce7 3691 *features |= feature; \
0e405443 3692 else \
75b81ce7 3693 *features &= ~feature; \
0e405443
GP
3694 } while (0)
3695
3696typedef int (*mlx5e_feature_handler)(struct net_device *netdev, bool enable);
3697
3698static int set_feature_lro(struct net_device *netdev, bool enable)
f62b8bb8
AV
3699{
3700 struct mlx5e_priv *priv = netdev_priv(netdev);
619a8f2a 3701 struct mlx5_core_dev *mdev = priv->mdev;
2e20a151 3702 struct mlx5e_channels new_channels = {};
619a8f2a 3703 struct mlx5e_params *old_params;
2e20a151
SM
3704 int err = 0;
3705 bool reset;
f62b8bb8
AV
3706
3707 mutex_lock(&priv->state_lock);
f62b8bb8 3708
db05815b
MM
3709 if (enable && priv->xsk.refcnt) {
3710 netdev_warn(netdev, "LRO is incompatible with AF_XDP (%hu XSKs are active)\n",
3711 priv->xsk.refcnt);
3712 err = -EINVAL;
3713 goto out;
3714 }
3715
619a8f2a 3716 old_params = &priv->channels.params;
6c3a823e
TT
3717 if (enable && !MLX5E_GET_PFLAG(old_params, MLX5E_PFLAG_RX_STRIDING_RQ)) {
3718 netdev_warn(netdev, "can't set LRO with legacy RQ\n");
3719 err = -EINVAL;
3720 goto out;
3721 }
3722
619a8f2a 3723 reset = test_bit(MLX5E_STATE_OPENED, &priv->state);
98e81b0a 3724
619a8f2a 3725 new_channels.params = *old_params;
2e20a151
SM
3726 new_channels.params.lro_en = enable;
3727
99cbfa93 3728 if (old_params->rq_wq_type != MLX5_WQ_TYPE_CYCLIC) {
db05815b
MM
3729 if (mlx5e_rx_mpwqe_is_linear_skb(mdev, old_params, NULL) ==
3730 mlx5e_rx_mpwqe_is_linear_skb(mdev, &new_channels.params, NULL))
619a8f2a
TT
3731 reset = false;
3732 }
3733
2e20a151 3734 if (!reset) {
619a8f2a 3735 *old_params = new_channels.params;
2e20a151
SM
3736 err = mlx5e_modify_tirs_lro(priv);
3737 goto out;
98e81b0a 3738 }
f62b8bb8 3739
b9ab5d0e
MM
3740 err = mlx5e_safe_switch_channels(priv, &new_channels,
3741 mlx5e_modify_tirs_lro_ctx, NULL);
2e20a151 3742out:
9b37b07f 3743 mutex_unlock(&priv->state_lock);
0e405443
GP
3744 return err;
3745}
3746
2b52a283 3747static int set_feature_cvlan_filter(struct net_device *netdev, bool enable)
0e405443
GP
3748{
3749 struct mlx5e_priv *priv = netdev_priv(netdev);
3750
3751 if (enable)
2b52a283 3752 mlx5e_enable_cvlan_filter(priv);
0e405443 3753 else
2b52a283 3754 mlx5e_disable_cvlan_filter(priv);
0e405443
GP
3755
3756 return 0;
3757}
3758
d956873f 3759#if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
0e405443
GP
3760static int set_feature_tc_num_filters(struct net_device *netdev, bool enable)
3761{
3762 struct mlx5e_priv *priv = netdev_priv(netdev);
f62b8bb8 3763
226f2ca3 3764 if (!enable && mlx5e_tc_num_filters(priv, MLX5_TC_FLAG(NIC_OFFLOAD))) {
e8f887ac
AV
3765 netdev_err(netdev,
3766 "Active offloaded tc filters, can't turn hw_tc_offload off\n");
3767 return -EINVAL;
3768 }
3769
0e405443
GP
3770 return 0;
3771}
077ecd78 3772#endif
0e405443 3773
94cb1ebb
EBE
3774static int set_feature_rx_all(struct net_device *netdev, bool enable)
3775{
3776 struct mlx5e_priv *priv = netdev_priv(netdev);
3777 struct mlx5_core_dev *mdev = priv->mdev;
3778
3779 return mlx5_set_port_fcs(mdev, !enable);
3780}
3781
102722fc
GE
3782static int set_feature_rx_fcs(struct net_device *netdev, bool enable)
3783{
3784 struct mlx5e_priv *priv = netdev_priv(netdev);
3785 int err;
3786
3787 mutex_lock(&priv->state_lock);
3788
3789 priv->channels.params.scatter_fcs_en = enable;
3790 err = mlx5e_modify_channels_scatter_fcs(&priv->channels, enable);
3791 if (err)
3792 priv->channels.params.scatter_fcs_en = !enable;
3793
3794 mutex_unlock(&priv->state_lock);
3795
3796 return err;
3797}
3798
36350114
GP
3799static int set_feature_rx_vlan(struct net_device *netdev, bool enable)
3800{
3801 struct mlx5e_priv *priv = netdev_priv(netdev);
ff9c852f 3802 int err = 0;
36350114
GP
3803
3804 mutex_lock(&priv->state_lock);
3805
6a9764ef 3806 priv->channels.params.vlan_strip_disable = !enable;
ff9c852f
SM
3807 if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
3808 goto unlock;
3809
3810 err = mlx5e_modify_channels_vsd(&priv->channels, !enable);
36350114 3811 if (err)
6a9764ef 3812 priv->channels.params.vlan_strip_disable = enable;
36350114 3813
ff9c852f 3814unlock:
36350114
GP
3815 mutex_unlock(&priv->state_lock);
3816
3817 return err;
3818}
3819
ec080045 3820#ifdef CONFIG_MLX5_EN_ARFS
45bf454a
MG
3821static int set_feature_arfs(struct net_device *netdev, bool enable)
3822{
3823 struct mlx5e_priv *priv = netdev_priv(netdev);
3824 int err;
3825
3826 if (enable)
3827 err = mlx5e_arfs_enable(priv);
3828 else
3829 err = mlx5e_arfs_disable(priv);
3830
3831 return err;
3832}
3833#endif
3834
0e405443 3835static int mlx5e_handle_feature(struct net_device *netdev,
75b81ce7 3836 netdev_features_t *features,
0e405443
GP
3837 netdev_features_t wanted_features,
3838 netdev_features_t feature,
3839 mlx5e_feature_handler feature_handler)
3840{
3841 netdev_features_t changes = wanted_features ^ netdev->features;
3842 bool enable = !!(wanted_features & feature);
3843 int err;
3844
3845 if (!(changes & feature))
3846 return 0;
3847
3848 err = feature_handler(netdev, enable);
3849 if (err) {
b20eab15
GP
3850 netdev_err(netdev, "%s feature %pNF failed, err %d\n",
3851 enable ? "Enable" : "Disable", &feature, err);
0e405443
GP
3852 return err;
3853 }
3854
75b81ce7 3855 MLX5E_SET_FEATURE(features, feature, enable);
0e405443
GP
3856 return 0;
3857}
3858
d3cbd425 3859int mlx5e_set_features(struct net_device *netdev, netdev_features_t features)
0e405443 3860{
75b81ce7 3861 netdev_features_t oper_features = netdev->features;
be0f780b
GP
3862 int err = 0;
3863
3864#define MLX5E_HANDLE_FEATURE(feature, handler) \
3865 mlx5e_handle_feature(netdev, &oper_features, features, feature, handler)
0e405443 3866
be0f780b
GP
3867 err |= MLX5E_HANDLE_FEATURE(NETIF_F_LRO, set_feature_lro);
3868 err |= MLX5E_HANDLE_FEATURE(NETIF_F_HW_VLAN_CTAG_FILTER,
2b52a283 3869 set_feature_cvlan_filter);
d956873f 3870#if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
be0f780b 3871 err |= MLX5E_HANDLE_FEATURE(NETIF_F_HW_TC, set_feature_tc_num_filters);
077ecd78 3872#endif
be0f780b
GP
3873 err |= MLX5E_HANDLE_FEATURE(NETIF_F_RXALL, set_feature_rx_all);
3874 err |= MLX5E_HANDLE_FEATURE(NETIF_F_RXFCS, set_feature_rx_fcs);
3875 err |= MLX5E_HANDLE_FEATURE(NETIF_F_HW_VLAN_CTAG_RX, set_feature_rx_vlan);
ec080045 3876#ifdef CONFIG_MLX5_EN_ARFS
be0f780b 3877 err |= MLX5E_HANDLE_FEATURE(NETIF_F_NTUPLE, set_feature_arfs);
45bf454a 3878#endif
1182f365 3879 err |= MLX5E_HANDLE_FEATURE(NETIF_F_HW_TLS_RX, mlx5e_ktls_set_feature_rx);
0e405443 3880
75b81ce7
GP
3881 if (err) {
3882 netdev->features = oper_features;
3883 return -EINVAL;
3884 }
3885
3886 return 0;
f62b8bb8
AV
3887}
3888
7d92d580
GP
3889static netdev_features_t mlx5e_fix_features(struct net_device *netdev,
3890 netdev_features_t features)
3891{
3892 struct mlx5e_priv *priv = netdev_priv(netdev);
6c3a823e 3893 struct mlx5e_params *params;
7d92d580
GP
3894
3895 mutex_lock(&priv->state_lock);
6c3a823e 3896 params = &priv->channels.params;
7d92d580
GP
3897 if (!bitmap_empty(priv->fs.vlan.active_svlans, VLAN_N_VID)) {
3898 /* HW strips the outer C-tag header, this is a problem
3899 * for S-tag traffic.
3900 */
3901 features &= ~NETIF_F_HW_VLAN_CTAG_RX;
6c3a823e 3902 if (!params->vlan_strip_disable)
7d92d580
GP
3903 netdev_warn(netdev, "Dropping C-tag vlan stripping offload due to S-tag vlan\n");
3904 }
6c3a823e 3905 if (!MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_STRIDING_RQ)) {
842a2eb2 3906 if (features & NETIF_F_LRO) {
6c3a823e 3907 netdev_warn(netdev, "Disabling LRO, not supported in legacy RQ\n");
842a2eb2
HN
3908 features &= ~NETIF_F_LRO;
3909 }
6c3a823e
TT
3910 }
3911
c0194e2d
SM
3912 if (MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS)) {
3913 features &= ~NETIF_F_RXHASH;
3914 if (netdev->features & NETIF_F_RXHASH)
3915 netdev_warn(netdev, "Disabling rxhash, not supported when CQE compress is active\n");
3916 }
3917
7d92d580
GP
3918 mutex_unlock(&priv->state_lock);
3919
3920 return features;
3921}
3922
db05815b
MM
3923static bool mlx5e_xsk_validate_mtu(struct net_device *netdev,
3924 struct mlx5e_channels *chs,
3925 struct mlx5e_params *new_params,
3926 struct mlx5_core_dev *mdev)
3927{
3928 u16 ix;
3929
3930 for (ix = 0; ix < chs->params.num_channels; ix++) {
1742b3d5
MK
3931 struct xsk_buff_pool *xsk_pool =
3932 mlx5e_xsk_get_pool(&chs->params, chs->params.xsk, ix);
db05815b
MM
3933 struct mlx5e_xsk_param xsk;
3934
1742b3d5 3935 if (!xsk_pool)
db05815b
MM
3936 continue;
3937
1742b3d5 3938 mlx5e_build_xsk_param(xsk_pool, &xsk);
db05815b
MM
3939
3940 if (!mlx5e_validate_xsk_param(new_params, &xsk, mdev)) {
3941 u32 hr = mlx5e_get_linear_rq_headroom(new_params, &xsk);
3942 int max_mtu_frame, max_mtu_page, max_mtu;
3943
3944 /* Two criteria must be met:
3945 * 1. HW MTU + all headrooms <= XSK frame size.
3946 * 2. Size of SKBs allocated on XDP_PASS <= PAGE_SIZE.
3947 */
3948 max_mtu_frame = MLX5E_HW2SW_MTU(new_params, xsk.chunk_size - hr);
3949 max_mtu_page = mlx5e_xdp_max_mtu(new_params, &xsk);
3950 max_mtu = min(max_mtu_frame, max_mtu_page);
3951
3952 netdev_err(netdev, "MTU %d is too big for an XSK running on channel %hu. Try MTU <= %d\n",
3953 new_params->sw_mtu, ix, max_mtu);
3954 return false;
3955 }
3956 }
3957
3958 return true;
3959}
3960
250a42b6 3961int mlx5e_change_mtu(struct net_device *netdev, int new_mtu,
b9ab5d0e 3962 mlx5e_fp_preactivate preactivate)
f62b8bb8
AV
3963{
3964 struct mlx5e_priv *priv = netdev_priv(netdev);
2e20a151 3965 struct mlx5e_channels new_channels = {};
472a1e44 3966 struct mlx5e_params *params;
98e81b0a 3967 int err = 0;
506753b0 3968 bool reset;
f62b8bb8 3969
f62b8bb8 3970 mutex_lock(&priv->state_lock);
98e81b0a 3971
472a1e44 3972 params = &priv->channels.params;
506753b0 3973
73281b78 3974 reset = !params->lro_en;
2e20a151 3975 reset = reset && test_bit(MLX5E_STATE_OPENED, &priv->state);
98e81b0a 3976
73281b78
TT
3977 new_channels.params = *params;
3978 new_channels.params.sw_mtu = new_mtu;
579524c6
VT
3979 err = mlx5e_validate_params(priv, &new_channels.params);
3980 if (err)
3981 goto out;
73281b78 3982
a26a5bdf 3983 if (params->xdp_prog &&
db05815b 3984 !mlx5e_rx_is_linear_skb(&new_channels.params, NULL)) {
a26a5bdf 3985 netdev_err(netdev, "MTU(%d) > %d is not allowed while XDP enabled\n",
a011b49f 3986 new_mtu, mlx5e_xdp_max_mtu(params, NULL));
a26a5bdf
TT
3987 err = -EINVAL;
3988 goto out;
3989 }
3990
db05815b
MM
3991 if (priv->xsk.refcnt &&
3992 !mlx5e_xsk_validate_mtu(netdev, &priv->channels,
3993 &new_channels.params, priv->mdev)) {
a26a5bdf
TT
3994 err = -EINVAL;
3995 goto out;
3996 }
3997
99cbfa93 3998 if (params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) {
db05815b
MM
3999 bool is_linear = mlx5e_rx_mpwqe_is_linear_skb(priv->mdev,
4000 &new_channels.params,
4001 NULL);
4002 u8 ppw_old = mlx5e_mpwqe_log_pkts_per_wqe(params, NULL);
4003 u8 ppw_new = mlx5e_mpwqe_log_pkts_per_wqe(&new_channels.params, NULL);
4004
4005 /* If XSK is active, XSK RQs are linear. */
4006 is_linear |= priv->xsk.refcnt;
73281b78 4007
db05815b 4008 /* Always reset in linear mode - hw_mtu is used in data path. */
0073c8f7 4009 reset = reset && (is_linear || (ppw_old != ppw_new));
73281b78
TT
4010 }
4011
2e20a151 4012 if (!reset) {
472a1e44 4013 params->sw_mtu = new_mtu;
b9ab5d0e
MM
4014 if (preactivate)
4015 preactivate(priv, NULL);
472a1e44 4016 netdev->mtu = params->sw_mtu;
2e20a151
SM
4017 goto out;
4018 }
98e81b0a 4019
b9ab5d0e 4020 err = mlx5e_safe_switch_channels(priv, &new_channels, preactivate, NULL);
472a1e44 4021 if (err)
2e20a151 4022 goto out;
2e20a151 4023
472a1e44 4024 netdev->mtu = new_channels.params.sw_mtu;
f62b8bb8 4025
2e20a151
SM
4026out:
4027 mutex_unlock(&priv->state_lock);
f62b8bb8
AV
4028 return err;
4029}
4030
250a42b6
AN
4031static int mlx5e_change_nic_mtu(struct net_device *netdev, int new_mtu)
4032{
b9ab5d0e 4033 return mlx5e_change_mtu(netdev, new_mtu, mlx5e_set_dev_port_mtu_ctx);
250a42b6
AN
4034}
4035
7c39afb3
FD
4036int mlx5e_hwstamp_set(struct mlx5e_priv *priv, struct ifreq *ifr)
4037{
4038 struct hwtstamp_config config;
4039 int err;
4040
6dbc80ca
MS
4041 if (!MLX5_CAP_GEN(priv->mdev, device_frequency_khz) ||
4042 (mlx5_clock_get_ptp_index(priv->mdev) == -1))
7c39afb3
FD
4043 return -EOPNOTSUPP;
4044
4045 if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
4046 return -EFAULT;
4047
4048 /* TX HW timestamp */
4049 switch (config.tx_type) {
4050 case HWTSTAMP_TX_OFF:
4051 case HWTSTAMP_TX_ON:
4052 break;
4053 default:
4054 return -ERANGE;
4055 }
4056
4057 mutex_lock(&priv->state_lock);
4058 /* RX HW timestamp */
4059 switch (config.rx_filter) {
4060 case HWTSTAMP_FILTER_NONE:
4061 /* Reset CQE compression to Admin default */
4062 mlx5e_modify_rx_cqe_compression_locked(priv, priv->channels.params.rx_cqe_compress_def);
4063 break;
4064 case HWTSTAMP_FILTER_ALL:
4065 case HWTSTAMP_FILTER_SOME:
4066 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
4067 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
4068 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
4069 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
4070 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
4071 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
4072 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
4073 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
4074 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
4075 case HWTSTAMP_FILTER_PTP_V2_EVENT:
4076 case HWTSTAMP_FILTER_PTP_V2_SYNC:
4077 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
4078 case HWTSTAMP_FILTER_NTP_ALL:
4079 /* Disable CQE compression */
42401967
SM
4080 if (MLX5E_GET_PFLAG(&priv->channels.params, MLX5E_PFLAG_RX_CQE_COMPRESS))
4081 netdev_warn(priv->netdev, "Disabling RX cqe compression\n");
7c39afb3
FD
4082 err = mlx5e_modify_rx_cqe_compression_locked(priv, false);
4083 if (err) {
4084 netdev_err(priv->netdev, "Failed disabling cqe compression err=%d\n", err);
4085 mutex_unlock(&priv->state_lock);
4086 return err;
4087 }
4088 config.rx_filter = HWTSTAMP_FILTER_ALL;
4089 break;
4090 default:
4091 mutex_unlock(&priv->state_lock);
4092 return -ERANGE;
4093 }
4094
4095 memcpy(&priv->tstamp, &config, sizeof(config));
4096 mutex_unlock(&priv->state_lock);
4097
c0194e2d
SM
4098 /* might need to fix some features */
4099 netdev_update_features(priv->netdev);
4100
7c39afb3
FD
4101 return copy_to_user(ifr->ifr_data, &config,
4102 sizeof(config)) ? -EFAULT : 0;
4103}
4104
4105int mlx5e_hwstamp_get(struct mlx5e_priv *priv, struct ifreq *ifr)
4106{
4107 struct hwtstamp_config *cfg = &priv->tstamp;
4108
4109 if (!MLX5_CAP_GEN(priv->mdev, device_frequency_khz))
4110 return -EOPNOTSUPP;
4111
4112 return copy_to_user(ifr->ifr_data, cfg, sizeof(*cfg)) ? -EFAULT : 0;
4113}
4114
ef9814de
EBE
4115static int mlx5e_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
4116{
1170fbd8
FD
4117 struct mlx5e_priv *priv = netdev_priv(dev);
4118
ef9814de
EBE
4119 switch (cmd) {
4120 case SIOCSHWTSTAMP:
1170fbd8 4121 return mlx5e_hwstamp_set(priv, ifr);
ef9814de 4122 case SIOCGHWTSTAMP:
1170fbd8 4123 return mlx5e_hwstamp_get(priv, ifr);
ef9814de
EBE
4124 default:
4125 return -EOPNOTSUPP;
4126 }
4127}
4128
e80541ec 4129#ifdef CONFIG_MLX5_ESWITCH
073caf50 4130int mlx5e_set_vf_mac(struct net_device *dev, int vf, u8 *mac)
66e49ded
SM
4131{
4132 struct mlx5e_priv *priv = netdev_priv(dev);
4133 struct mlx5_core_dev *mdev = priv->mdev;
4134
4135 return mlx5_eswitch_set_vport_mac(mdev->priv.eswitch, vf + 1, mac);
4136}
4137
79aab093
MS
4138static int mlx5e_set_vf_vlan(struct net_device *dev, int vf, u16 vlan, u8 qos,
4139 __be16 vlan_proto)
66e49ded
SM
4140{
4141 struct mlx5e_priv *priv = netdev_priv(dev);
4142 struct mlx5_core_dev *mdev = priv->mdev;
4143
79aab093
MS
4144 if (vlan_proto != htons(ETH_P_8021Q))
4145 return -EPROTONOSUPPORT;
4146
66e49ded
SM
4147 return mlx5_eswitch_set_vport_vlan(mdev->priv.eswitch, vf + 1,
4148 vlan, qos);
4149}
4150
f942380c
MHY
4151static int mlx5e_set_vf_spoofchk(struct net_device *dev, int vf, bool setting)
4152{
4153 struct mlx5e_priv *priv = netdev_priv(dev);
4154 struct mlx5_core_dev *mdev = priv->mdev;
4155
4156 return mlx5_eswitch_set_vport_spoofchk(mdev->priv.eswitch, vf + 1, setting);
4157}
4158
1edc57e2
MHY
4159static int mlx5e_set_vf_trust(struct net_device *dev, int vf, bool setting)
4160{
4161 struct mlx5e_priv *priv = netdev_priv(dev);
4162 struct mlx5_core_dev *mdev = priv->mdev;
4163
4164 return mlx5_eswitch_set_vport_trust(mdev->priv.eswitch, vf + 1, setting);
4165}
bd77bf1c 4166
073caf50
OG
4167int mlx5e_set_vf_rate(struct net_device *dev, int vf, int min_tx_rate,
4168 int max_tx_rate)
bd77bf1c
MHY
4169{
4170 struct mlx5e_priv *priv = netdev_priv(dev);
4171 struct mlx5_core_dev *mdev = priv->mdev;
4172
bd77bf1c 4173 return mlx5_eswitch_set_vport_rate(mdev->priv.eswitch, vf + 1,
c9497c98 4174 max_tx_rate, min_tx_rate);
bd77bf1c
MHY
4175}
4176
66e49ded
SM
4177static int mlx5_vport_link2ifla(u8 esw_link)
4178{
4179 switch (esw_link) {
cc9c82a8 4180 case MLX5_VPORT_ADMIN_STATE_DOWN:
66e49ded 4181 return IFLA_VF_LINK_STATE_DISABLE;
cc9c82a8 4182 case MLX5_VPORT_ADMIN_STATE_UP:
66e49ded
SM
4183 return IFLA_VF_LINK_STATE_ENABLE;
4184 }
4185 return IFLA_VF_LINK_STATE_AUTO;
4186}
4187
4188static int mlx5_ifla_link2vport(u8 ifla_link)
4189{
4190 switch (ifla_link) {
4191 case IFLA_VF_LINK_STATE_DISABLE:
cc9c82a8 4192 return MLX5_VPORT_ADMIN_STATE_DOWN;
66e49ded 4193 case IFLA_VF_LINK_STATE_ENABLE:
cc9c82a8 4194 return MLX5_VPORT_ADMIN_STATE_UP;
66e49ded 4195 }
cc9c82a8 4196 return MLX5_VPORT_ADMIN_STATE_AUTO;
66e49ded
SM
4197}
4198
4199static int mlx5e_set_vf_link_state(struct net_device *dev, int vf,
4200 int link_state)
4201{
4202 struct mlx5e_priv *priv = netdev_priv(dev);
4203 struct mlx5_core_dev *mdev = priv->mdev;
4204
4205 return mlx5_eswitch_set_vport_state(mdev->priv.eswitch, vf + 1,
4206 mlx5_ifla_link2vport(link_state));
4207}
4208
073caf50
OG
4209int mlx5e_get_vf_config(struct net_device *dev,
4210 int vf, struct ifla_vf_info *ivi)
66e49ded
SM
4211{
4212 struct mlx5e_priv *priv = netdev_priv(dev);
4213 struct mlx5_core_dev *mdev = priv->mdev;
4214 int err;
4215
4216 err = mlx5_eswitch_get_vport_config(mdev->priv.eswitch, vf + 1, ivi);
4217 if (err)
4218 return err;
4219 ivi->linkstate = mlx5_vport_link2ifla(ivi->linkstate);
4220 return 0;
4221}
4222
073caf50
OG
4223int mlx5e_get_vf_stats(struct net_device *dev,
4224 int vf, struct ifla_vf_stats *vf_stats)
66e49ded
SM
4225{
4226 struct mlx5e_priv *priv = netdev_priv(dev);
4227 struct mlx5_core_dev *mdev = priv->mdev;
4228
4229 return mlx5_eswitch_get_vport_stats(mdev->priv.eswitch, vf + 1,
4230 vf_stats);
4231}
e80541ec 4232#endif
66e49ded 4233
3d093bc2
AL
4234static bool mlx5e_gre_tunnel_inner_proto_offload_supported(struct mlx5_core_dev *mdev,
4235 struct sk_buff *skb)
4236{
4237 switch (skb->inner_protocol) {
4238 case htons(ETH_P_IP):
4239 case htons(ETH_P_IPV6):
4240 case htons(ETH_P_TEB):
4241 return true;
4242 case htons(ETH_P_MPLS_UC):
4243 case htons(ETH_P_MPLS_MC):
4244 return MLX5_CAP_ETH(mdev, tunnel_stateless_mpls_over_gre);
4245 }
4246 return false;
4247}
4248
27299841
GP
4249static netdev_features_t mlx5e_tunnel_features_check(struct mlx5e_priv *priv,
4250 struct sk_buff *skb,
4251 netdev_features_t features)
b3f63c3d 4252{
2989ad1e 4253 unsigned int offset = 0;
b3f63c3d 4254 struct udphdr *udph;
27299841
GP
4255 u8 proto;
4256 u16 port;
b3f63c3d
MF
4257
4258 switch (vlan_get_protocol(skb)) {
4259 case htons(ETH_P_IP):
4260 proto = ip_hdr(skb)->protocol;
4261 break;
4262 case htons(ETH_P_IPV6):
2989ad1e 4263 proto = ipv6_find_hdr(skb, &offset, -1, NULL, NULL);
b3f63c3d
MF
4264 break;
4265 default:
4266 goto out;
4267 }
4268
27299841
GP
4269 switch (proto) {
4270 case IPPROTO_GRE:
3d093bc2
AL
4271 if (mlx5e_gre_tunnel_inner_proto_offload_supported(priv->mdev, skb))
4272 return features;
4273 break;
25948b87
MV
4274 case IPPROTO_IPIP:
4275 case IPPROTO_IPV6:
9c98f7ec
MV
4276 if (mlx5e_tunnel_proto_supported(priv->mdev, IPPROTO_IPIP))
4277 return features;
4278 break;
27299841 4279 case IPPROTO_UDP:
b3f63c3d
MF
4280 udph = udp_hdr(skb);
4281 port = be16_to_cpu(udph->dest);
b3f63c3d 4282
27299841 4283 /* Verify if UDP port is being offloaded by HW */
358aa5ce 4284 if (mlx5_vxlan_lookup_port(priv->mdev->vxlan, port))
27299841 4285 return features;
e3cfc7e6
MS
4286
4287#if IS_ENABLED(CONFIG_GENEVE)
4288 /* Support Geneve offload for default UDP port */
4289 if (port == GENEVE_UDP_PORT && mlx5_geneve_tx_allowed(priv->mdev))
4290 return features;
4291#endif
27299841 4292 }
b3f63c3d
MF
4293
4294out:
4295 /* Disable CSUM and GSO if the udp dport is not offloaded by HW */
4296 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
4297}
4298
073caf50
OG
4299netdev_features_t mlx5e_features_check(struct sk_buff *skb,
4300 struct net_device *netdev,
4301 netdev_features_t features)
b3f63c3d
MF
4302{
4303 struct mlx5e_priv *priv = netdev_priv(netdev);
4304
4305 features = vlan_features_check(skb, features);
4306 features = vxlan_features_check(skb, features);
4307
2ac9cfe7
IT
4308#ifdef CONFIG_MLX5_EN_IPSEC
4309 if (mlx5e_ipsec_feature_check(skb, netdev, features))
4310 return features;
4311#endif
4312
b3f63c3d
MF
4313 /* Validate if the tunneled packet is being offloaded by HW */
4314 if (skb->encapsulation &&
4315 (features & NETIF_F_CSUM_MASK || features & NETIF_F_GSO_MASK))
27299841 4316 return mlx5e_tunnel_features_check(priv, skb, features);
b3f63c3d
MF
4317
4318 return features;
4319}
4320
bfc647d5 4321static void mlx5e_tx_timeout_work(struct work_struct *work)
3947ca18 4322{
bfc647d5
EBE
4323 struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
4324 tx_timeout_work);
7d91126b 4325 int i;
3947ca18 4326
bfc647d5
EBE
4327 rtnl_lock();
4328 mutex_lock(&priv->state_lock);
4329
4330 if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
4331 goto unlock;
3947ca18 4332
6a9764ef 4333 for (i = 0; i < priv->channels.num * priv->channels.params.num_tc; i++) {
7d91126b
EBE
4334 struct netdev_queue *dev_queue =
4335 netdev_get_tx_queue(priv->netdev, i);
acc6c595 4336 struct mlx5e_txqsq *sq = priv->txq2sq[i];
3947ca18 4337
84990945 4338 if (!netif_xmit_stopped(dev_queue))
3947ca18 4339 continue;
bfc647d5 4340
06293ae4 4341 if (mlx5e_reporter_tx_timeout(sq))
e6205564
AL
4342 /* break if tried to reopened channels */
4343 break;
3947ca18
DJ
4344 }
4345
bfc647d5
EBE
4346unlock:
4347 mutex_unlock(&priv->state_lock);
4348 rtnl_unlock();
4349}
4350
0290bd29 4351static void mlx5e_tx_timeout(struct net_device *dev, unsigned int txqueue)
bfc647d5
EBE
4352{
4353 struct mlx5e_priv *priv = netdev_priv(dev);
4354
4355 netdev_err(dev, "TX timeout detected\n");
4356 queue_work(priv->wq, &priv->tx_timeout_work);
3947ca18
DJ
4357}
4358
a26a5bdf 4359static int mlx5e_xdp_allowed(struct mlx5e_priv *priv, struct bpf_prog *prog)
0ec13877
TT
4360{
4361 struct net_device *netdev = priv->netdev;
a26a5bdf 4362 struct mlx5e_channels new_channels = {};
0ec13877
TT
4363
4364 if (priv->channels.params.lro_en) {
4365 netdev_warn(netdev, "can't set XDP while LRO is on, disable LRO first\n");
4366 return -EINVAL;
4367 }
4368
4369 if (MLX5_IPSEC_DEV(priv->mdev)) {
4370 netdev_warn(netdev, "can't set XDP with IPSec offload\n");
4371 return -EINVAL;
4372 }
4373
a26a5bdf
TT
4374 new_channels.params = priv->channels.params;
4375 new_channels.params.xdp_prog = prog;
4376
db05815b
MM
4377 /* No XSK params: AF_XDP can't be enabled yet at the point of setting
4378 * the XDP program.
4379 */
4380 if (!mlx5e_rx_is_linear_skb(&new_channels.params, NULL)) {
a26a5bdf 4381 netdev_warn(netdev, "XDP is not allowed with MTU(%d) > %d\n",
d460c271 4382 new_channels.params.sw_mtu,
a011b49f 4383 mlx5e_xdp_max_mtu(&new_channels.params, NULL));
a26a5bdf
TT
4384 return -EINVAL;
4385 }
4386
0ec13877
TT
4387 return 0;
4388}
4389
fe45386a
MM
4390static void mlx5e_rq_replace_xdp_prog(struct mlx5e_rq *rq, struct bpf_prog *prog)
4391{
4392 struct bpf_prog *old_prog;
4393
4394 old_prog = rcu_replace_pointer(rq->xdp_prog, prog,
4395 lockdep_is_held(&rq->channel->priv->state_lock));
4396 if (old_prog)
4397 bpf_prog_put(old_prog);
4398}
4399
86994156
RS
4400static int mlx5e_xdp_set(struct net_device *netdev, struct bpf_prog *prog)
4401{
4402 struct mlx5e_priv *priv = netdev_priv(netdev);
4403 struct bpf_prog *old_prog;
86994156 4404 bool reset, was_opened;
96d39502 4405 int err = 0;
86994156
RS
4406 int i;
4407
4408 mutex_lock(&priv->state_lock);
4409
0ec13877 4410 if (prog) {
a26a5bdf 4411 err = mlx5e_xdp_allowed(priv, prog);
0ec13877
TT
4412 if (err)
4413 goto unlock;
547eede0
IT
4414 }
4415
86994156
RS
4416 was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
4417 /* no need for full reset when exchanging programs */
6a9764ef 4418 reset = (!priv->channels.params.xdp_prog || !prog);
86994156 4419
85192dbf 4420 if (was_opened && !reset)
c54c0629
DB
4421 /* num_channels is invariant here, so we can take the
4422 * batched reference right upfront.
4423 */
85192dbf 4424 bpf_prog_add(prog, priv->channels.num);
86994156 4425
e1895324
MM
4426 if (was_opened && reset) {
4427 struct mlx5e_channels new_channels = {};
4428
4429 new_channels.params = priv->channels.params;
4430 new_channels.params.xdp_prog = prog;
4431 mlx5e_set_rq_type(priv->mdev, &new_channels.params);
4432 old_prog = priv->channels.params.xdp_prog;
4433
b9ab5d0e 4434 err = mlx5e_safe_switch_channels(priv, &new_channels, NULL, NULL);
e1895324
MM
4435 if (err)
4436 goto unlock;
4437 } else {
4438 /* exchange programs, extra prog reference we got from caller
4439 * as long as we don't fail from this point onwards.
4440 */
4441 old_prog = xchg(&priv->channels.params.xdp_prog, prog);
4442 }
4443
86994156
RS
4444 if (old_prog)
4445 bpf_prog_put(old_prog);
4446
e1895324 4447 if (!was_opened && reset) /* change RQ type according to priv->xdp_prog */
2a0f561b 4448 mlx5e_set_rq_type(priv->mdev, &priv->channels.params);
86994156 4449
e1895324 4450 if (!was_opened || reset)
86994156
RS
4451 goto unlock;
4452
4453 /* exchanging programs w/o reset, we update ref counts on behalf
4454 * of the channels RQs here.
4455 */
ff9c852f
SM
4456 for (i = 0; i < priv->channels.num; i++) {
4457 struct mlx5e_channel *c = priv->channels.c[i];
86994156 4458
fe45386a
MM
4459 mlx5e_rq_replace_xdp_prog(&c->rq, prog);
4460 if (test_bit(MLX5E_CHANNEL_STATE_XSK, c->state))
4461 mlx5e_rq_replace_xdp_prog(&c->xskrq, prog);
86994156
RS
4462 }
4463
4464unlock:
4465 mutex_unlock(&priv->state_lock);
4466 return err;
4467}
4468
f4e63525 4469static int mlx5e_xdp(struct net_device *dev, struct netdev_bpf *xdp)
86994156
RS
4470{
4471 switch (xdp->command) {
4472 case XDP_SETUP_PROG:
4473 return mlx5e_xdp_set(dev, xdp->prog);
1742b3d5
MK
4474 case XDP_SETUP_XSK_POOL:
4475 return mlx5e_xsk_setup_pool(dev, xdp->xsk.pool,
db05815b 4476 xdp->xsk.queue_id);
86994156
RS
4477 default:
4478 return -EINVAL;
4479 }
4480}
4481
4b89251d
HN
4482#ifdef CONFIG_MLX5_ESWITCH
4483static int mlx5e_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
4484 struct net_device *dev, u32 filter_mask,
4485 int nlflags)
4486{
4487 struct mlx5e_priv *priv = netdev_priv(dev);
4488 struct mlx5_core_dev *mdev = priv->mdev;
4489 u8 mode, setting;
4490 int err;
4491
4492 err = mlx5_eswitch_get_vepa(mdev->priv.eswitch, &setting);
4493 if (err)
4494 return err;
4495 mode = setting ? BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB;
4496 return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
4497 mode,
4498 0, 0, nlflags, filter_mask, NULL);
4499}
4500
4501static int mlx5e_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
4502 u16 flags, struct netlink_ext_ack *extack)
4503{
4504 struct mlx5e_priv *priv = netdev_priv(dev);
4505 struct mlx5_core_dev *mdev = priv->mdev;
4506 struct nlattr *attr, *br_spec;
4507 u16 mode = BRIDGE_MODE_UNDEF;
4508 u8 setting;
4509 int rem;
4510
4511 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
4512 if (!br_spec)
4513 return -EINVAL;
4514
4515 nla_for_each_nested(attr, br_spec, rem) {
4516 if (nla_type(attr) != IFLA_BRIDGE_MODE)
4517 continue;
4518
4519 if (nla_len(attr) < sizeof(mode))
4520 return -EINVAL;
4521
4522 mode = nla_get_u16(attr);
4523 if (mode > BRIDGE_MODE_VEPA)
4524 return -EINVAL;
4525
4526 break;
4527 }
4528
4529 if (mode == BRIDGE_MODE_UNDEF)
4530 return -EINVAL;
4531
4532 setting = (mode == BRIDGE_MODE_VEPA) ? 1 : 0;
4533 return mlx5_eswitch_set_vepa(mdev->priv.eswitch, setting);
4534}
4535#endif
4536
4d8fcf21 4537const struct net_device_ops mlx5e_netdev_ops = {
f62b8bb8
AV
4538 .ndo_open = mlx5e_open,
4539 .ndo_stop = mlx5e_close,
4540 .ndo_start_xmit = mlx5e_xmit,
0cf0f6d3 4541 .ndo_setup_tc = mlx5e_setup_tc,
08fb1dac 4542 .ndo_select_queue = mlx5e_select_queue,
f62b8bb8
AV
4543 .ndo_get_stats64 = mlx5e_get_stats,
4544 .ndo_set_rx_mode = mlx5e_set_rx_mode,
4545 .ndo_set_mac_address = mlx5e_set_mac,
b0eed40e
SM
4546 .ndo_vlan_rx_add_vid = mlx5e_vlan_rx_add_vid,
4547 .ndo_vlan_rx_kill_vid = mlx5e_vlan_rx_kill_vid,
f62b8bb8 4548 .ndo_set_features = mlx5e_set_features,
7d92d580 4549 .ndo_fix_features = mlx5e_fix_features,
250a42b6 4550 .ndo_change_mtu = mlx5e_change_nic_mtu,
b0eed40e 4551 .ndo_do_ioctl = mlx5e_ioctl,
507f0c81 4552 .ndo_set_tx_maxrate = mlx5e_set_tx_maxrate,
18a2b7f9
JK
4553 .ndo_udp_tunnel_add = udp_tunnel_nic_add_port,
4554 .ndo_udp_tunnel_del = udp_tunnel_nic_del_port,
706b3583 4555 .ndo_features_check = mlx5e_features_check,
3947ca18 4556 .ndo_tx_timeout = mlx5e_tx_timeout,
f4e63525 4557 .ndo_bpf = mlx5e_xdp,
58b99ee3 4558 .ndo_xdp_xmit = mlx5e_xdp_xmit,
9116e5e2 4559 .ndo_xsk_wakeup = mlx5e_xsk_wakeup,
ec080045
SM
4560#ifdef CONFIG_MLX5_EN_ARFS
4561 .ndo_rx_flow_steer = mlx5e_rx_flow_steer,
4562#endif
e80541ec 4563#ifdef CONFIG_MLX5_ESWITCH
4b89251d
HN
4564 .ndo_bridge_setlink = mlx5e_bridge_setlink,
4565 .ndo_bridge_getlink = mlx5e_bridge_getlink,
4566
706b3583 4567 /* SRIOV E-Switch NDOs */
b0eed40e
SM
4568 .ndo_set_vf_mac = mlx5e_set_vf_mac,
4569 .ndo_set_vf_vlan = mlx5e_set_vf_vlan,
f942380c 4570 .ndo_set_vf_spoofchk = mlx5e_set_vf_spoofchk,
1edc57e2 4571 .ndo_set_vf_trust = mlx5e_set_vf_trust,
bd77bf1c 4572 .ndo_set_vf_rate = mlx5e_set_vf_rate,
b0eed40e
SM
4573 .ndo_get_vf_config = mlx5e_get_vf_config,
4574 .ndo_set_vf_link_state = mlx5e_set_vf_link_state,
4575 .ndo_get_vf_stats = mlx5e_get_vf_stats,
e80541ec 4576#endif
162add8c 4577 .ndo_get_devlink_port = mlx5e_get_devlink_port,
f62b8bb8
AV
4578};
4579
4580static int mlx5e_check_required_hca_cap(struct mlx5_core_dev *mdev)
4581{
4582 if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
9eb78923 4583 return -EOPNOTSUPP;
f62b8bb8
AV
4584 if (!MLX5_CAP_GEN(mdev, eth_net_offloads) ||
4585 !MLX5_CAP_GEN(mdev, nic_flow_table) ||
4586 !MLX5_CAP_ETH(mdev, csum_cap) ||
4587 !MLX5_CAP_ETH(mdev, max_lso_cap) ||
4588 !MLX5_CAP_ETH(mdev, vlan_cap) ||
796a27ec
GP
4589 !MLX5_CAP_ETH(mdev, rss_ind_tbl_cap) ||
4590 MLX5_CAP_FLOWTABLE(mdev,
4591 flow_table_properties_nic_receive.max_ft_level)
4592 < 3) {
f62b8bb8
AV
4593 mlx5_core_warn(mdev,
4594 "Not creating net device, some required device capabilities are missing\n");
9eb78923 4595 return -EOPNOTSUPP;
f62b8bb8 4596 }
66189961
TT
4597 if (!MLX5_CAP_ETH(mdev, self_lb_en_modifiable))
4598 mlx5_core_warn(mdev, "Self loop back prevention is not supported\n");
7524a5d8 4599 if (!MLX5_CAP_GEN(mdev, cq_moderation))
3e432ab6 4600 mlx5_core_warn(mdev, "CQ moderation is not supported\n");
66189961 4601
f62b8bb8
AV
4602 return 0;
4603}
4604
d4b6c488 4605void mlx5e_build_default_indir_rqt(u32 *indirection_rqt, int len,
85082dba
TT
4606 int num_channels)
4607{
4608 int i;
4609
4610 for (i = 0; i < len; i++)
4611 indirection_rqt[i] = i % num_channels;
4612}
4613
0608d4db 4614static bool slow_pci_heuristic(struct mlx5_core_dev *mdev)
b797a684 4615{
0608d4db
TT
4616 u32 link_speed = 0;
4617 u32 pci_bw = 0;
b797a684 4618
2c81bfd5 4619 mlx5e_port_max_linkspeed(mdev, &link_speed);
3c0d551e 4620 pci_bw = pcie_bandwidth_available(mdev->pdev, NULL, NULL, NULL);
0608d4db
TT
4621 mlx5_core_dbg_once(mdev, "Max link speed = %d, PCI BW = %d\n",
4622 link_speed, pci_bw);
4623
4624#define MLX5E_SLOW_PCI_RATIO (2)
4625
4626 return link_speed && pci_bw &&
4627 link_speed > MLX5E_SLOW_PCI_RATIO * pci_bw;
0f6e4cf6
EBE
4628}
4629
8960b389 4630static struct dim_cq_moder mlx5e_get_def_tx_moderation(u8 cq_period_mode)
0088cbbc 4631{
8960b389 4632 struct dim_cq_moder moder;
cbce4f44
TG
4633
4634 moder.cq_period_mode = cq_period_mode;
4635 moder.pkts = MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS;
4636 moder.usec = MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC;
4637 if (cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE)
4638 moder.usec = MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC_FROM_CQE;
4639
4640 return moder;
4641}
0088cbbc 4642
8960b389 4643static struct dim_cq_moder mlx5e_get_def_rx_moderation(u8 cq_period_mode)
cbce4f44 4644{
8960b389 4645 struct dim_cq_moder moder;
0088cbbc 4646
cbce4f44
TG
4647 moder.cq_period_mode = cq_period_mode;
4648 moder.pkts = MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS;
4649 moder.usec = MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC;
0088cbbc 4650 if (cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE)
cbce4f44
TG
4651 moder.usec = MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC_FROM_CQE;
4652
4653 return moder;
4654}
4655
4656static u8 mlx5_to_net_dim_cq_period_mode(u8 cq_period_mode)
4657{
4658 return cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE ?
c002bd52
TG
4659 DIM_CQ_PERIOD_MODE_START_FROM_CQE :
4660 DIM_CQ_PERIOD_MODE_START_FROM_EQE;
cbce4f44
TG
4661}
4662
ebeaf084 4663void mlx5e_reset_tx_moderation(struct mlx5e_params *params, u8 cq_period_mode)
cbce4f44
TG
4664{
4665 if (params->tx_dim_enabled) {
4666 u8 dim_period_mode = mlx5_to_net_dim_cq_period_mode(cq_period_mode);
4667
4668 params->tx_cq_moderation = net_dim_get_def_tx_moderation(dim_period_mode);
4669 } else {
4670 params->tx_cq_moderation = mlx5e_get_def_tx_moderation(cq_period_mode);
4671 }
0088cbbc
TG
4672}
4673
ebeaf084 4674void mlx5e_reset_rx_moderation(struct mlx5e_params *params, u8 cq_period_mode)
9908aa29 4675{
9a317425 4676 if (params->rx_dim_enabled) {
cbce4f44
TG
4677 u8 dim_period_mode = mlx5_to_net_dim_cq_period_mode(cq_period_mode);
4678
4679 params->rx_cq_moderation = net_dim_get_def_rx_moderation(dim_period_mode);
4680 } else {
4681 params->rx_cq_moderation = mlx5e_get_def_rx_moderation(cq_period_mode);
9a317425 4682 }
ebeaf084
TG
4683}
4684
4685void mlx5e_set_tx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode)
4686{
4687 mlx5e_reset_tx_moderation(params, cq_period_mode);
4688 MLX5E_SET_PFLAG(params, MLX5E_PFLAG_TX_CQE_BASED_MODER,
4689 params->tx_cq_moderation.cq_period_mode ==
4690 MLX5_CQ_PERIOD_MODE_START_FROM_CQE);
4691}
457fcd8a 4692
ebeaf084
TG
4693void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode)
4694{
4695 mlx5e_reset_rx_moderation(params, cq_period_mode);
6a9764ef 4696 MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_CQE_BASED_MODER,
0088cbbc
TG
4697 params->rx_cq_moderation.cq_period_mode ==
4698 MLX5_CQ_PERIOD_MODE_START_FROM_CQE);
9908aa29
TT
4699}
4700
707129dc 4701static u32 mlx5e_choose_lro_timeout(struct mlx5_core_dev *mdev, u32 wanted_timeout)
2b029556
SM
4702{
4703 int i;
4704
4705 /* The supported periods are organized in ascending order */
4706 for (i = 0; i < MLX5E_LRO_TIMEOUT_ARR_SIZE - 1; i++)
4707 if (MLX5_CAP_ETH(mdev, lro_timer_supported_periods[i]) >= wanted_timeout)
4708 break;
4709
4710 return MLX5_CAP_ETH(mdev, lro_timer_supported_periods[i]);
4711}
4712
749359f4
GT
4713void mlx5e_build_rq_params(struct mlx5_core_dev *mdev,
4714 struct mlx5e_params *params)
4715{
4716 /* Prefer Striding RQ, unless any of the following holds:
4717 * - Striding RQ configuration is not possible/supported.
4718 * - Slow PCI heuristic.
4719 * - Legacy RQ would use linear SKB while Striding RQ would use non-linear.
db05815b
MM
4720 *
4721 * No XSK params: checking the availability of striding RQ in general.
749359f4
GT
4722 */
4723 if (!slow_pci_heuristic(mdev) &&
4724 mlx5e_striding_rq_possible(mdev, params) &&
db05815b
MM
4725 (mlx5e_rx_mpwqe_is_linear_skb(mdev, params, NULL) ||
4726 !mlx5e_rx_is_linear_skb(params, NULL)))
749359f4
GT
4727 MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_STRIDING_RQ, true);
4728 mlx5e_set_rq_type(mdev, params);
4729 mlx5e_init_rq_type_params(mdev, params);
4730}
4731
bbeb53b8
AL
4732void mlx5e_build_rss_params(struct mlx5e_rss_params *rss_params,
4733 u16 num_channels)
3edc0159 4734{
756c4160
AL
4735 enum mlx5e_traffic_types tt;
4736
7ee2ace9 4737 rss_params->hfunc = ETH_RSS_HASH_TOP;
bbeb53b8
AL
4738 netdev_rss_key_fill(rss_params->toeplitz_hash_key,
4739 sizeof(rss_params->toeplitz_hash_key));
4740 mlx5e_build_default_indir_rqt(rss_params->indirection_rqt,
4741 MLX5E_INDIR_RQT_SIZE, num_channels);
756c4160
AL
4742 for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++)
4743 rss_params->rx_hash_fields[tt] =
4744 tirc_default_config[tt].rx_hash_fields;
3edc0159
GT
4745}
4746
57c7fce1 4747void mlx5e_build_nic_params(struct mlx5e_priv *priv,
db05815b 4748 struct mlx5e_xsk *xsk,
bbeb53b8 4749 struct mlx5e_rss_params *rss_params,
8f493ffd 4750 struct mlx5e_params *params,
57c7fce1 4751 u16 mtu)
f62b8bb8 4752{
57c7fce1 4753 struct mlx5_core_dev *mdev = priv->mdev;
48bfc397 4754 u8 rx_cq_period_mode;
2fc4bfb7 4755
472a1e44
TT
4756 params->sw_mtu = mtu;
4757 params->hard_mtu = MLX5E_ETH_HARD_MTU;
57c7fce1
FL
4758 params->num_channels = min_t(unsigned int, MLX5E_MAX_NUM_CHANNELS / 2,
4759 priv->max_nch);
6a9764ef 4760 params->num_tc = 1;
2b029556 4761
6a9764ef
SM
4762 /* SQ */
4763 params->log_sq_size = is_kdump_kernel() ?
b4e029da
KH
4764 MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE :
4765 MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE;
5af75c74
MM
4766 MLX5E_SET_PFLAG(params, MLX5E_PFLAG_SKB_TX_MPWQE,
4767 MLX5_CAP_ETH(mdev, enhanced_multi_pkt_send_wqe));
461017cb 4768
6277053a
TT
4769 /* XDP SQ */
4770 MLX5E_SET_PFLAG(params, MLX5E_PFLAG_XDP_TX_MPWQE,
4771 MLX5_CAP_ETH(mdev, enhanced_multi_pkt_send_wqe));
4772
b797a684 4773 /* set CQE compression */
6a9764ef 4774 params->rx_cqe_compress_def = false;
b797a684 4775 if (MLX5_CAP_GEN(mdev, cqe_compression) &&
e53eef63 4776 MLX5_CAP_GEN(mdev, vport_group_manager))
0608d4db 4777 params->rx_cqe_compress_def = slow_pci_heuristic(mdev);
0f6e4cf6 4778
6a9764ef 4779 MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS, params->rx_cqe_compress_def);
b856df28 4780 MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_NO_CSUM_COMPLETE, false);
6a9764ef
SM
4781
4782 /* RQ */
749359f4 4783 mlx5e_build_rq_params(mdev, params);
b797a684 4784
6a9764ef 4785 /* HW LRO */
02377e6e
TT
4786 if (MLX5_CAP_ETH(mdev, lro_cap) &&
4787 params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) {
db05815b
MM
4788 /* No XSK params: checking the availability of striding RQ in general. */
4789 if (!mlx5e_rx_mpwqe_is_linear_skb(mdev, params, NULL))
619a8f2a 4790 params->lro_en = !slow_pci_heuristic(mdev);
db05815b 4791 }
6a9764ef 4792 params->lro_timeout = mlx5e_choose_lro_timeout(mdev, MLX5E_DEFAULT_LRO_TIMEOUT);
b0d4660b 4793
6a9764ef 4794 /* CQ moderation params */
48bfc397 4795 rx_cq_period_mode = MLX5_CAP_GEN(mdev, cq_period_start_from_cqe) ?
6a9764ef
SM
4796 MLX5_CQ_PERIOD_MODE_START_FROM_CQE :
4797 MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
9a317425 4798 params->rx_dim_enabled = MLX5_CAP_GEN(mdev, cq_moderation);
cbce4f44 4799 params->tx_dim_enabled = MLX5_CAP_GEN(mdev, cq_moderation);
48bfc397
TG
4800 mlx5e_set_rx_cq_mode_params(params, rx_cq_period_mode);
4801 mlx5e_set_tx_cq_mode_params(params, MLX5_CQ_PERIOD_MODE_START_FROM_EQE);
9908aa29 4802
6a9764ef 4803 /* TX inline */
b431302e 4804 mlx5_query_min_inline(mdev, &params->tx_min_inline_mode);
a6f402e4 4805
6a9764ef 4806 /* RSS */
bbeb53b8 4807 mlx5e_build_rss_params(rss_params, params->num_channels);
69dad68d
TT
4808 params->tunneled_offload_en =
4809 mlx5e_tunnel_inner_ft_supported(mdev);
db05815b
MM
4810
4811 /* AF_XDP */
4812 params->xsk = xsk;
6a9764ef 4813}
f62b8bb8 4814
f62b8bb8
AV
4815static void mlx5e_set_netdev_dev_addr(struct net_device *netdev)
4816{
4817 struct mlx5e_priv *priv = netdev_priv(netdev);
4818
e1d974d0 4819 mlx5_query_mac_address(priv->mdev, netdev->dev_addr);
108805fc
SM
4820 if (is_zero_ether_addr(netdev->dev_addr) &&
4821 !MLX5_CAP_GEN(priv->mdev, vport_group_manager)) {
4822 eth_hw_addr_random(netdev);
4823 mlx5_core_info(priv->mdev, "Assigned random MAC address %pM\n", netdev->dev_addr);
4824 }
f62b8bb8
AV
4825}
4826
18a2b7f9
JK
4827static int mlx5e_vxlan_set_port(struct net_device *netdev, unsigned int table,
4828 unsigned int entry, struct udp_tunnel_info *ti)
4829{
4830 struct mlx5e_priv *priv = netdev_priv(netdev);
4831
4832 return mlx5_vxlan_add_port(priv->mdev->vxlan, ntohs(ti->port));
4833}
4834
4835static int mlx5e_vxlan_unset_port(struct net_device *netdev, unsigned int table,
4836 unsigned int entry, struct udp_tunnel_info *ti)
4837{
4838 struct mlx5e_priv *priv = netdev_priv(netdev);
4839
4840 return mlx5_vxlan_del_port(priv->mdev->vxlan, ntohs(ti->port));
4841}
4842
4843void mlx5e_vxlan_set_netdev_info(struct mlx5e_priv *priv)
4844{
4845 if (!mlx5_vxlan_allowed(priv->mdev->vxlan))
4846 return;
4847
4848 priv->nic_info.set_port = mlx5e_vxlan_set_port;
4849 priv->nic_info.unset_port = mlx5e_vxlan_unset_port;
4850 priv->nic_info.flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP |
4851 UDP_TUNNEL_NIC_INFO_STATIC_IANA_VXLAN;
4852 priv->nic_info.tables[0].tunnel_types = UDP_TUNNEL_TYPE_VXLAN;
4853 /* Don't count the space hard-coded to the IANA port */
4854 priv->nic_info.tables[0].n_entries =
4855 mlx5_vxlan_max_udp_ports(priv->mdev) - 1;
4856
4857 priv->netdev->udp_tunnel_nic_info = &priv->nic_info;
4858}
4859
6bfd390b 4860static void mlx5e_build_nic_netdev(struct net_device *netdev)
f62b8bb8
AV
4861{
4862 struct mlx5e_priv *priv = netdev_priv(netdev);
4863 struct mlx5_core_dev *mdev = priv->mdev;
94cb1ebb
EBE
4864 bool fcs_supported;
4865 bool fcs_enabled;
f62b8bb8 4866
c42260f1 4867 SET_NETDEV_DEV(netdev, mdev->device);
f62b8bb8 4868
e80541ec
SM
4869 netdev->netdev_ops = &mlx5e_netdev_ops;
4870
3f3ab178 4871 mlx5e_dcbnl_build_netdev(netdev);
66e49ded 4872
f62b8bb8
AV
4873 netdev->watchdog_timeo = 15 * HZ;
4874
4875 netdev->ethtool_ops = &mlx5e_ethtool_ops;
4876
12be4b21 4877 netdev->vlan_features |= NETIF_F_SG;
e4683f35 4878 netdev->vlan_features |= NETIF_F_HW_CSUM;
f62b8bb8
AV
4879 netdev->vlan_features |= NETIF_F_GRO;
4880 netdev->vlan_features |= NETIF_F_TSO;
4881 netdev->vlan_features |= NETIF_F_TSO6;
4882 netdev->vlan_features |= NETIF_F_RXCSUM;
4883 netdev->vlan_features |= NETIF_F_RXHASH;
4884
5dc9520b
AL
4885 netdev->mpls_features |= NETIF_F_SG;
4886 netdev->mpls_features |= NETIF_F_HW_CSUM;
4887 netdev->mpls_features |= NETIF_F_TSO;
4888 netdev->mpls_features |= NETIF_F_TSO6;
4889
71186172
AH
4890 netdev->hw_enc_features |= NETIF_F_HW_VLAN_CTAG_TX;
4891 netdev->hw_enc_features |= NETIF_F_HW_VLAN_CTAG_RX;
4892
6c3a823e
TT
4893 if (!!MLX5_CAP_ETH(mdev, lro_cap) &&
4894 mlx5e_check_fragmented_striding_rq_cap(mdev))
f62b8bb8
AV
4895 netdev->vlan_features |= NETIF_F_LRO;
4896
4897 netdev->hw_features = netdev->vlan_features;
e4cf27bd 4898 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX;
f62b8bb8
AV
4899 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
4900 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
4382c7b9 4901 netdev->hw_features |= NETIF_F_HW_VLAN_STAG_TX;
f62b8bb8 4902
18a2b7f9
JK
4903 mlx5e_vxlan_set_netdev_info(priv);
4904
e3cfc7e6 4905 if (mlx5_vxlan_allowed(mdev->vxlan) || mlx5_geneve_tx_allowed(mdev) ||
e3a53bc5 4906 mlx5e_any_tunnel_proto_supported(mdev)) {
e4683f35 4907 netdev->hw_enc_features |= NETIF_F_HW_CSUM;
b3f63c3d
MF
4908 netdev->hw_enc_features |= NETIF_F_TSO;
4909 netdev->hw_enc_features |= NETIF_F_TSO6;
27299841
GP
4910 netdev->hw_enc_features |= NETIF_F_GSO_PARTIAL;
4911 }
4912
e3cfc7e6 4913 if (mlx5_vxlan_allowed(mdev->vxlan) || mlx5_geneve_tx_allowed(mdev)) {
27299841
GP
4914 netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL |
4915 NETIF_F_GSO_UDP_TUNNEL_CSUM;
4916 netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL |
4917 NETIF_F_GSO_UDP_TUNNEL_CSUM;
b49663c8 4918 netdev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM;
a1718505
DC
4919 netdev->vlan_features |= NETIF_F_GSO_UDP_TUNNEL |
4920 NETIF_F_GSO_UDP_TUNNEL_CSUM;
b3f63c3d
MF
4921 }
4922
e3a53bc5 4923 if (mlx5e_tunnel_proto_supported(mdev, IPPROTO_GRE)) {
27299841
GP
4924 netdev->hw_features |= NETIF_F_GSO_GRE |
4925 NETIF_F_GSO_GRE_CSUM;
4926 netdev->hw_enc_features |= NETIF_F_GSO_GRE |
4927 NETIF_F_GSO_GRE_CSUM;
4928 netdev->gso_partial_features |= NETIF_F_GSO_GRE |
4929 NETIF_F_GSO_GRE_CSUM;
4930 }
4931
25948b87
MV
4932 if (mlx5e_tunnel_proto_supported(mdev, IPPROTO_IPIP)) {
4933 netdev->hw_features |= NETIF_F_GSO_IPXIP4 |
4934 NETIF_F_GSO_IPXIP6;
4935 netdev->hw_enc_features |= NETIF_F_GSO_IPXIP4 |
4936 NETIF_F_GSO_IPXIP6;
4937 netdev->gso_partial_features |= NETIF_F_GSO_IPXIP4 |
4938 NETIF_F_GSO_IPXIP6;
4939 }
4940
3f44899e
BP
4941 netdev->hw_features |= NETIF_F_GSO_PARTIAL;
4942 netdev->gso_partial_features |= NETIF_F_GSO_UDP_L4;
4943 netdev->hw_features |= NETIF_F_GSO_UDP_L4;
4944 netdev->features |= NETIF_F_GSO_UDP_L4;
4945
94cb1ebb
EBE
4946 mlx5_query_port_fcs(mdev, &fcs_supported, &fcs_enabled);
4947
4948 if (fcs_supported)
4949 netdev->hw_features |= NETIF_F_RXALL;
4950
102722fc
GE
4951 if (MLX5_CAP_ETH(mdev, scatter_fcs))
4952 netdev->hw_features |= NETIF_F_RXFCS;
4953
f62b8bb8 4954 netdev->features = netdev->hw_features;
6a9764ef 4955 if (!priv->channels.params.lro_en)
f62b8bb8
AV
4956 netdev->features &= ~NETIF_F_LRO;
4957
94cb1ebb
EBE
4958 if (fcs_enabled)
4959 netdev->features &= ~NETIF_F_RXALL;
4960
102722fc
GE
4961 if (!priv->channels.params.scatter_fcs_en)
4962 netdev->features &= ~NETIF_F_RXFCS;
4963
c0194e2d
SM
4964 /* prefere CQE compression over rxhash */
4965 if (MLX5E_GET_PFLAG(&priv->channels.params, MLX5E_PFLAG_RX_CQE_COMPRESS))
4966 netdev->features &= ~NETIF_F_RXHASH;
4967
e8f887ac
AV
4968#define FT_CAP(f) MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_receive.f)
4969 if (FT_CAP(flow_modify_en) &&
4970 FT_CAP(modify_root) &&
4971 FT_CAP(identified_miss_table_mode) &&
1cabe6b0 4972 FT_CAP(flow_table_modify)) {
077ecd78 4973#ifdef CONFIG_MLX5_ESWITCH
1cabe6b0 4974 netdev->hw_features |= NETIF_F_HW_TC;
077ecd78 4975#endif
ec080045 4976#ifdef CONFIG_MLX5_EN_ARFS
1cabe6b0
MG
4977 netdev->hw_features |= NETIF_F_NTUPLE;
4978#endif
4979 }
e8f887ac 4980
f62b8bb8 4981 netdev->features |= NETIF_F_HIGHDMA;
7d92d580 4982 netdev->features |= NETIF_F_HW_VLAN_STAG_FILTER;
f62b8bb8
AV
4983
4984 netdev->priv_flags |= IFF_UNICAST_FLT;
4985
4986 mlx5e_set_netdev_dev_addr(netdev);
547eede0 4987 mlx5e_ipsec_build_netdev(priv);
c83294b9 4988 mlx5e_tls_build_netdev(priv);
f62b8bb8
AV
4989}
4990
1462e48d 4991void mlx5e_create_q_counters(struct mlx5e_priv *priv)
593cf338 4992{
66247fbb
LR
4993 u32 out[MLX5_ST_SZ_DW(alloc_q_counter_out)] = {};
4994 u32 in[MLX5_ST_SZ_DW(alloc_q_counter_in)] = {};
593cf338
RS
4995 struct mlx5_core_dev *mdev = priv->mdev;
4996 int err;
4997
66247fbb
LR
4998 MLX5_SET(alloc_q_counter_in, in, opcode, MLX5_CMD_OP_ALLOC_Q_COUNTER);
4999 err = mlx5_cmd_exec_inout(mdev, alloc_q_counter, in, out);
5000 if (!err)
5001 priv->q_counter =
5002 MLX5_GET(alloc_q_counter_out, out, counter_set_id);
7cbaf9a3 5003
66247fbb
LR
5004 err = mlx5_cmd_exec_inout(mdev, alloc_q_counter, in, out);
5005 if (!err)
5006 priv->drop_rq_q_counter =
5007 MLX5_GET(alloc_q_counter_out, out, counter_set_id);
593cf338
RS
5008}
5009
1462e48d 5010void mlx5e_destroy_q_counters(struct mlx5e_priv *priv)
593cf338 5011{
66247fbb
LR
5012 u32 in[MLX5_ST_SZ_DW(dealloc_q_counter_in)] = {};
5013
5014 MLX5_SET(dealloc_q_counter_in, in, opcode,
5015 MLX5_CMD_OP_DEALLOC_Q_COUNTER);
5016 if (priv->q_counter) {
5017 MLX5_SET(dealloc_q_counter_in, in, counter_set_id,
5018 priv->q_counter);
5019 mlx5_cmd_exec_in(priv->mdev, dealloc_q_counter, in);
5020 }
593cf338 5021
66247fbb
LR
5022 if (priv->drop_rq_q_counter) {
5023 MLX5_SET(dealloc_q_counter_in, in, counter_set_id,
5024 priv->drop_rq_q_counter);
5025 mlx5_cmd_exec_in(priv->mdev, dealloc_q_counter, in);
5026 }
593cf338
RS
5027}
5028
182570b2
FD
5029static int mlx5e_nic_init(struct mlx5_core_dev *mdev,
5030 struct net_device *netdev,
5031 const struct mlx5e_profile *profile,
5032 void *ppriv)
6bfd390b
HHZ
5033{
5034 struct mlx5e_priv *priv = netdev_priv(netdev);
bbeb53b8 5035 struct mlx5e_rss_params *rss = &priv->rss_params;
547eede0 5036 int err;
6bfd390b 5037
519a0bf5 5038 err = mlx5e_netdev_init(netdev, priv, mdev, profile, ppriv);
182570b2
FD
5039 if (err)
5040 return err;
5041
57c7fce1
FL
5042 mlx5e_build_nic_params(priv, &priv->xsk, rss, &priv->channels.params,
5043 netdev->mtu);
519a0bf5
SM
5044
5045 mlx5e_timestamp_init(priv);
5046
547eede0
IT
5047 err = mlx5e_ipsec_init(priv);
5048 if (err)
5049 mlx5_core_err(mdev, "IPSec initialization failed, %d\n", err);
43585a41
IL
5050 err = mlx5e_tls_init(priv);
5051 if (err)
5052 mlx5_core_err(mdev, "TLS initialization failed, %d\n", err);
6bfd390b 5053 mlx5e_build_nic_netdev(netdev);
4d54d325
VT
5054 err = mlx5e_devlink_port_register(priv);
5055 if (err)
5056 mlx5_core_err(mdev, "mlx5e_devlink_port_register failed, %d\n", err);
11af6a6d 5057 mlx5e_health_create_reporters(priv);
182570b2
FD
5058
5059 return 0;
6bfd390b
HHZ
5060}
5061
5062static void mlx5e_nic_cleanup(struct mlx5e_priv *priv)
5063{
11af6a6d 5064 mlx5e_health_destroy_reporters(priv);
4d54d325 5065 mlx5e_devlink_port_unregister(priv);
43585a41 5066 mlx5e_tls_cleanup(priv);
547eede0 5067 mlx5e_ipsec_cleanup(priv);
182570b2 5068 mlx5e_netdev_cleanup(priv->netdev, priv);
6bfd390b
HHZ
5069}
5070
5071static int mlx5e_init_nic_rx(struct mlx5e_priv *priv)
5072{
5073 struct mlx5_core_dev *mdev = priv->mdev;
5074 int err;
6bfd390b 5075
1462e48d
RD
5076 mlx5e_create_q_counters(priv);
5077
5078 err = mlx5e_open_drop_rq(priv, &priv->drop_rq);
5079 if (err) {
5080 mlx5_core_err(mdev, "open drop rq failed, %d\n", err);
5081 goto err_destroy_q_counters;
5082 }
5083
8f493ffd
SM
5084 err = mlx5e_create_indirect_rqt(priv);
5085 if (err)
1462e48d 5086 goto err_close_drop_rq;
6bfd390b 5087
db05815b 5088 err = mlx5e_create_direct_rqts(priv, priv->direct_tir);
8f493ffd 5089 if (err)
6bfd390b 5090 goto err_destroy_indirect_rqts;
6bfd390b 5091
46dc933c 5092 err = mlx5e_create_indirect_tirs(priv, true);
8f493ffd 5093 if (err)
6bfd390b 5094 goto err_destroy_direct_rqts;
6bfd390b 5095
db05815b 5096 err = mlx5e_create_direct_tirs(priv, priv->direct_tir);
8f493ffd 5097 if (err)
6bfd390b 5098 goto err_destroy_indirect_tirs;
6bfd390b 5099
db05815b
MM
5100 err = mlx5e_create_direct_rqts(priv, priv->xsk_tir);
5101 if (unlikely(err))
5102 goto err_destroy_direct_tirs;
5103
5104 err = mlx5e_create_direct_tirs(priv, priv->xsk_tir);
5105 if (unlikely(err))
5106 goto err_destroy_xsk_rqts;
5107
6bfd390b
HHZ
5108 err = mlx5e_create_flow_steering(priv);
5109 if (err) {
5110 mlx5_core_warn(mdev, "create flow steering failed, %d\n", err);
db05815b 5111 goto err_destroy_xsk_tirs;
6bfd390b
HHZ
5112 }
5113
655dc3d2 5114 err = mlx5e_tc_nic_init(priv);
6bfd390b
HHZ
5115 if (err)
5116 goto err_destroy_flow_steering;
5117
1182f365
TT
5118 err = mlx5e_accel_init_rx(priv);
5119 if (err)
5120 goto err_tc_nic_cleanup;
5121
f4aebbfb
AL
5122#ifdef CONFIG_MLX5_EN_ARFS
5123 priv->netdev->rx_cpu_rmap = mlx5_eq_table_get_rmap(priv->mdev);
5124#endif
5125
6bfd390b
HHZ
5126 return 0;
5127
1182f365
TT
5128err_tc_nic_cleanup:
5129 mlx5e_tc_nic_cleanup(priv);
6bfd390b
HHZ
5130err_destroy_flow_steering:
5131 mlx5e_destroy_flow_steering(priv);
db05815b
MM
5132err_destroy_xsk_tirs:
5133 mlx5e_destroy_direct_tirs(priv, priv->xsk_tir);
5134err_destroy_xsk_rqts:
5135 mlx5e_destroy_direct_rqts(priv, priv->xsk_tir);
6bfd390b 5136err_destroy_direct_tirs:
db05815b 5137 mlx5e_destroy_direct_tirs(priv, priv->direct_tir);
6bfd390b 5138err_destroy_indirect_tirs:
a16b8e0d 5139 mlx5e_destroy_indirect_tirs(priv);
6bfd390b 5140err_destroy_direct_rqts:
db05815b 5141 mlx5e_destroy_direct_rqts(priv, priv->direct_tir);
6bfd390b
HHZ
5142err_destroy_indirect_rqts:
5143 mlx5e_destroy_rqt(priv, &priv->indir_rqt);
1462e48d
RD
5144err_close_drop_rq:
5145 mlx5e_close_drop_rq(&priv->drop_rq);
5146err_destroy_q_counters:
5147 mlx5e_destroy_q_counters(priv);
6bfd390b
HHZ
5148 return err;
5149}
5150
5151static void mlx5e_cleanup_nic_rx(struct mlx5e_priv *priv)
5152{
1182f365 5153 mlx5e_accel_cleanup_rx(priv);
655dc3d2 5154 mlx5e_tc_nic_cleanup(priv);
6bfd390b 5155 mlx5e_destroy_flow_steering(priv);
db05815b
MM
5156 mlx5e_destroy_direct_tirs(priv, priv->xsk_tir);
5157 mlx5e_destroy_direct_rqts(priv, priv->xsk_tir);
5158 mlx5e_destroy_direct_tirs(priv, priv->direct_tir);
a16b8e0d 5159 mlx5e_destroy_indirect_tirs(priv);
db05815b 5160 mlx5e_destroy_direct_rqts(priv, priv->direct_tir);
6bfd390b 5161 mlx5e_destroy_rqt(priv, &priv->indir_rqt);
1462e48d
RD
5162 mlx5e_close_drop_rq(&priv->drop_rq);
5163 mlx5e_destroy_q_counters(priv);
6bfd390b
HHZ
5164}
5165
5166static int mlx5e_init_nic_tx(struct mlx5e_priv *priv)
5167{
5168 int err;
5169
5170 err = mlx5e_create_tises(priv);
5171 if (err) {
5172 mlx5_core_warn(priv->mdev, "create tises failed, %d\n", err);
5173 return err;
5174 }
5175
e207b7e9 5176 mlx5e_dcbnl_initialize(priv);
6bfd390b
HHZ
5177 return 0;
5178}
5179
5180static void mlx5e_nic_enable(struct mlx5e_priv *priv)
5181{
5182 struct net_device *netdev = priv->netdev;
5183 struct mlx5_core_dev *mdev = priv->mdev;
2c3b5bee
SM
5184
5185 mlx5e_init_l2_addr(priv);
5186
63bfd399
EBE
5187 /* Marking the link as currently not needed by the Driver */
5188 if (!netif_running(netdev))
7d0314b1 5189 mlx5e_modify_admin_state(mdev, MLX5_PORT_DOWN);
63bfd399 5190
6d7ee2ed 5191 mlx5e_set_netdev_mtu_boundaries(priv);
2c3b5bee 5192 mlx5e_set_dev_port_mtu(priv);
6bfd390b 5193
7907f23a
AH
5194 mlx5_lag_add(mdev, netdev);
5195
6bfd390b 5196 mlx5e_enable_async_events(priv);
5c7e8bbb
ED
5197 if (mlx5e_monitor_counter_supported(priv))
5198 mlx5e_monitor_counter_init(priv);
127ea380 5199
cef35af3 5200 mlx5e_hv_vhca_stats_create(priv);
610e89e0
SM
5201 if (netdev->reg_state != NETREG_REGISTERED)
5202 return;
2a5e7a13 5203 mlx5e_dcbnl_init_app(priv);
610e89e0
SM
5204
5205 queue_work(priv->wq, &priv->set_rx_mode_work);
2c3b5bee
SM
5206
5207 rtnl_lock();
5208 if (netif_running(netdev))
5209 mlx5e_open(netdev);
18a2b7f9 5210 udp_tunnel_nic_reset_ntf(priv->netdev);
2c3b5bee
SM
5211 netif_device_attach(netdev);
5212 rtnl_unlock();
6bfd390b
HHZ
5213}
5214
5215static void mlx5e_nic_disable(struct mlx5e_priv *priv)
5216{
3deef8ce 5217 struct mlx5_core_dev *mdev = priv->mdev;
3deef8ce 5218
2a5e7a13
HN
5219 if (priv->netdev->reg_state == NETREG_REGISTERED)
5220 mlx5e_dcbnl_delete_app(priv);
2a5e7a13 5221
2c3b5bee
SM
5222 rtnl_lock();
5223 if (netif_running(priv->netdev))
5224 mlx5e_close(priv->netdev);
5225 netif_device_detach(priv->netdev);
5226 rtnl_unlock();
5227
6bfd390b 5228 queue_work(priv->wq, &priv->set_rx_mode_work);
1d447a39 5229
cef35af3 5230 mlx5e_hv_vhca_stats_destroy(priv);
5c7e8bbb
ED
5231 if (mlx5e_monitor_counter_supported(priv))
5232 mlx5e_monitor_counter_cleanup(priv);
5233
6bfd390b 5234 mlx5e_disable_async_events(priv);
e387f7d5 5235 mlx5_lag_remove(mdev);
c5eb51ad 5236 mlx5_vxlan_reset_to_default(mdev->vxlan);
6bfd390b
HHZ
5237}
5238
a90f88fe
GT
5239int mlx5e_update_nic_rx(struct mlx5e_priv *priv)
5240{
80639b19 5241 return mlx5e_refresh_tirs(priv, false, false);
a90f88fe
GT
5242}
5243
6bfd390b
HHZ
5244static const struct mlx5e_profile mlx5e_nic_profile = {
5245 .init = mlx5e_nic_init,
5246 .cleanup = mlx5e_nic_cleanup,
5247 .init_rx = mlx5e_init_nic_rx,
5248 .cleanup_rx = mlx5e_cleanup_nic_rx,
5249 .init_tx = mlx5e_init_nic_tx,
5250 .cleanup_tx = mlx5e_cleanup_nic_tx,
5251 .enable = mlx5e_nic_enable,
5252 .disable = mlx5e_nic_disable,
a90f88fe 5253 .update_rx = mlx5e_update_nic_rx,
b521105b 5254 .update_stats = mlx5e_stats_update_ndo_stats,
7ca42c80 5255 .update_carrier = mlx5e_update_carrier,
5adf4c47 5256 .rx_handlers = &mlx5e_rx_handlers_nic,
6bfd390b 5257 .max_tc = MLX5E_MAX_NUM_TC,
694826e3 5258 .rq_groups = MLX5E_NUM_RQ_GROUPS(XSK),
3460c184
SM
5259 .stats_grps = mlx5e_nic_stats_grps,
5260 .stats_grps_num = mlx5e_nic_stats_grps_num,
6bfd390b
HHZ
5261};
5262
2c3b5bee
SM
5263/* mlx5e generic netdev management API (move to en_common.c) */
5264
182570b2 5265/* mlx5e_netdev_init/cleanup must be called from profile->init/cleanup callbacks */
519a0bf5
SM
5266int mlx5e_netdev_init(struct net_device *netdev,
5267 struct mlx5e_priv *priv,
5268 struct mlx5_core_dev *mdev,
5269 const struct mlx5e_profile *profile,
5270 void *ppriv)
182570b2 5271{
519a0bf5
SM
5272 /* priv init */
5273 priv->mdev = mdev;
5274 priv->netdev = netdev;
5275 priv->profile = profile;
5276 priv->ppriv = ppriv;
5277 priv->msglevel = MLX5E_MSG_LEVEL;
694826e3 5278 priv->max_nch = netdev->num_rx_queues / max_t(u8, profile->rq_groups, 1);
519a0bf5 5279 priv->max_opened_tc = 1;
182570b2 5280
3909a12e
MM
5281 if (!alloc_cpumask_var(&priv->scratchpad.cpumask, GFP_KERNEL))
5282 return -ENOMEM;
5283
519a0bf5
SM
5284 mutex_init(&priv->state_lock);
5285 INIT_WORK(&priv->update_carrier_work, mlx5e_update_carrier_work);
5286 INIT_WORK(&priv->set_rx_mode_work, mlx5e_set_rx_mode_work);
5287 INIT_WORK(&priv->tx_timeout_work, mlx5e_tx_timeout_work);
cdeef2b1 5288 INIT_WORK(&priv->update_stats_work, mlx5e_update_stats_work);
303211b4 5289
182570b2
FD
5290 priv->wq = create_singlethread_workqueue("mlx5e");
5291 if (!priv->wq)
3909a12e 5292 goto err_free_cpumask;
182570b2 5293
519a0bf5
SM
5294 /* netdev init */
5295 netif_carrier_off(netdev);
5296
182570b2 5297 return 0;
3909a12e
MM
5298
5299err_free_cpumask:
5300 free_cpumask_var(priv->scratchpad.cpumask);
5301
5302 return -ENOMEM;
182570b2
FD
5303}
5304
5305void mlx5e_netdev_cleanup(struct net_device *netdev, struct mlx5e_priv *priv)
5306{
5307 destroy_workqueue(priv->wq);
3909a12e 5308 free_cpumask_var(priv->scratchpad.cpumask);
182570b2
FD
5309}
5310
26e59d80
MHY
5311struct net_device *mlx5e_create_netdev(struct mlx5_core_dev *mdev,
5312 const struct mlx5e_profile *profile,
779d986d 5313 int nch,
26e59d80 5314 void *ppriv)
f62b8bb8
AV
5315{
5316 struct net_device *netdev;
182570b2 5317 int err;
f62b8bb8 5318
08fb1dac 5319 netdev = alloc_etherdev_mqs(sizeof(struct mlx5e_priv),
6bfd390b 5320 nch * profile->max_tc,
694826e3 5321 nch * profile->rq_groups);
f62b8bb8
AV
5322 if (!netdev) {
5323 mlx5_core_err(mdev, "alloc_etherdev_mqs() failed\n");
5324 return NULL;
5325 }
5326
182570b2
FD
5327 err = profile->init(mdev, netdev, profile, ppriv);
5328 if (err) {
5329 mlx5_core_err(mdev, "failed to init mlx5e profile %d\n", err);
5330 goto err_free_netdev;
5331 }
26e59d80
MHY
5332
5333 return netdev;
5334
182570b2 5335err_free_netdev:
26e59d80
MHY
5336 free_netdev(netdev);
5337
5338 return NULL;
5339}
5340
2c3b5bee 5341int mlx5e_attach_netdev(struct mlx5e_priv *priv)
26e59d80 5342{
3909a12e 5343 const bool take_rtnl = priv->netdev->reg_state == NETREG_REGISTERED;
26e59d80 5344 const struct mlx5e_profile *profile;
a1f240f1 5345 int max_nch;
26e59d80
MHY
5346 int err;
5347
26e59d80
MHY
5348 profile = priv->profile;
5349 clear_bit(MLX5E_STATE_DESTROYING, &priv->state);
7bb29755 5350
a1f240f1
YA
5351 /* max number of channels may have changed */
5352 max_nch = mlx5e_get_max_num_channels(priv->mdev);
5353 if (priv->channels.params.num_channels > max_nch) {
5354 mlx5_core_warn(priv->mdev, "MLX5E: Reducing number of channels to %d\n", max_nch);
3909a12e
MM
5355 /* Reducing the number of channels - RXFH has to be reset, and
5356 * mlx5e_num_channels_changed below will build the RQT.
5357 */
fe867cac 5358 priv->netdev->priv_flags &= ~IFF_RXFH_CONFIGURED;
a1f240f1 5359 priv->channels.params.num_channels = max_nch;
a1f240f1 5360 }
3909a12e
MM
5361 /* 1. Set the real number of queues in the kernel the first time.
5362 * 2. Set our default XPS cpumask.
5363 * 3. Build the RQT.
5364 *
5365 * rtnl_lock is required by netif_set_real_num_*_queues in case the
5366 * netdev has been registered by this point (if this function was called
5367 * in the reload or resume flow).
5368 */
5369 if (take_rtnl)
5370 rtnl_lock();
fa374877 5371 err = mlx5e_num_channels_changed(priv);
3909a12e
MM
5372 if (take_rtnl)
5373 rtnl_unlock();
fa374877
MM
5374 if (err)
5375 goto out;
a1f240f1 5376
6bfd390b
HHZ
5377 err = profile->init_tx(priv);
5378 if (err)
ec8b9981 5379 goto out;
5c50368f 5380
6bfd390b
HHZ
5381 err = profile->init_rx(priv);
5382 if (err)
1462e48d 5383 goto err_cleanup_tx;
5c50368f 5384
6bfd390b
HHZ
5385 if (profile->enable)
5386 profile->enable(priv);
f62b8bb8 5387
26e59d80 5388 return 0;
5c50368f 5389
1462e48d 5390err_cleanup_tx:
6bfd390b 5391 profile->cleanup_tx(priv);
5c50368f 5392
26e59d80 5393out:
5cd39b6e
AL
5394 set_bit(MLX5E_STATE_DESTROYING, &priv->state);
5395 cancel_work_sync(&priv->update_stats_work);
26e59d80 5396 return err;
f62b8bb8
AV
5397}
5398
2c3b5bee 5399void mlx5e_detach_netdev(struct mlx5e_priv *priv)
26e59d80 5400{
26e59d80
MHY
5401 const struct mlx5e_profile *profile = priv->profile;
5402
5403 set_bit(MLX5E_STATE_DESTROYING, &priv->state);
26e59d80 5404
37f304d1
SM
5405 if (profile->disable)
5406 profile->disable(priv);
5407 flush_workqueue(priv->wq);
5408
26e59d80 5409 profile->cleanup_rx(priv);
26e59d80 5410 profile->cleanup_tx(priv);
cdeef2b1 5411 cancel_work_sync(&priv->update_stats_work);
26e59d80
MHY
5412}
5413
2c3b5bee
SM
5414void mlx5e_destroy_netdev(struct mlx5e_priv *priv)
5415{
5416 const struct mlx5e_profile *profile = priv->profile;
5417 struct net_device *netdev = priv->netdev;
5418
2c3b5bee
SM
5419 if (profile->cleanup)
5420 profile->cleanup(priv);
5421 free_netdev(netdev);
5422}
5423
26e59d80
MHY
5424/* mlx5e_attach and mlx5e_detach scope should be only creating/destroying
5425 * hardware contexts and to connect it to the current netdev.
5426 */
5427static int mlx5e_attach(struct mlx5_core_dev *mdev, void *vpriv)
5428{
5429 struct mlx5e_priv *priv = vpriv;
5430 struct net_device *netdev = priv->netdev;
5431 int err;
5432
5433 if (netif_device_present(netdev))
5434 return 0;
5435
5436 err = mlx5e_create_mdev_resources(mdev);
5437 if (err)
5438 return err;
5439
2c3b5bee 5440 err = mlx5e_attach_netdev(priv);
26e59d80
MHY
5441 if (err) {
5442 mlx5e_destroy_mdev_resources(mdev);
5443 return err;
5444 }
5445
5446 return 0;
5447}
5448
5449static void mlx5e_detach(struct mlx5_core_dev *mdev, void *vpriv)
5450{
5451 struct mlx5e_priv *priv = vpriv;
5452 struct net_device *netdev = priv->netdev;
5453
47c9d2c9
AH
5454#ifdef CONFIG_MLX5_ESWITCH
5455 if (MLX5_ESWITCH_MANAGER(mdev) && vpriv == mdev)
5456 return;
5457#endif
5458
26e59d80
MHY
5459 if (!netif_device_present(netdev))
5460 return;
5461
2c3b5bee 5462 mlx5e_detach_netdev(priv);
26e59d80
MHY
5463 mlx5e_destroy_mdev_resources(mdev);
5464}
5465
b50d292b
HHZ
5466static void *mlx5e_add(struct mlx5_core_dev *mdev)
5467{
07c9f1e5 5468 struct net_device *netdev;
26e59d80 5469 void *priv;
26e59d80 5470 int err;
779d986d 5471 int nch;
b50d292b 5472
26e59d80
MHY
5473 err = mlx5e_check_required_hca_cap(mdev);
5474 if (err)
b50d292b
HHZ
5475 return NULL;
5476
aec002f6
OG
5477#ifdef CONFIG_MLX5_ESWITCH
5478 if (MLX5_ESWITCH_MANAGER(mdev) &&
f6455de0 5479 mlx5_eswitch_mode(mdev->priv.eswitch) == MLX5_ESWITCH_OFFLOADS) {
aec002f6
OG
5480 mlx5e_rep_register_vport_reps(mdev);
5481 return mdev;
5482 }
5483#endif
5484
779d986d 5485 nch = mlx5e_get_max_num_channels(mdev);
13e509a4 5486 netdev = mlx5e_create_netdev(mdev, &mlx5e_nic_profile, nch, NULL);
26e59d80
MHY
5487 if (!netdev) {
5488 mlx5_core_err(mdev, "mlx5e_create_netdev failed\n");
13e509a4 5489 return NULL;
26e59d80
MHY
5490 }
5491
71c6eaeb 5492 dev_net_set(netdev, mlx5_core_net(mdev));
26e59d80
MHY
5493 priv = netdev_priv(netdev);
5494
5495 err = mlx5e_attach(mdev, priv);
5496 if (err) {
5497 mlx5_core_err(mdev, "mlx5e_attach failed, %d\n", err);
5498 goto err_destroy_netdev;
5499 }
5500
31e87b39 5501 err = register_netdev(netdev);
c6acd629 5502 if (err) {
31e87b39 5503 mlx5_core_err(mdev, "register_netdev failed, %d\n", err);
4d54d325 5504 goto err_detach;
c6acd629
VT
5505 }
5506
31e87b39
VT
5507 mlx5e_devlink_port_type_eth_set(priv);
5508
2a5e7a13 5509 mlx5e_dcbnl_init_app(priv);
26e59d80
MHY
5510 return priv;
5511
5512err_detach:
5513 mlx5e_detach(mdev, priv);
26e59d80 5514err_destroy_netdev:
2c3b5bee 5515 mlx5e_destroy_netdev(priv);
26e59d80 5516 return NULL;
b50d292b
HHZ
5517}
5518
b50d292b
HHZ
5519static void mlx5e_remove(struct mlx5_core_dev *mdev, void *vpriv)
5520{
aec002f6 5521 struct mlx5e_priv *priv;
127ea380 5522
aec002f6
OG
5523#ifdef CONFIG_MLX5_ESWITCH
5524 if (MLX5_ESWITCH_MANAGER(mdev) && vpriv == mdev) {
5525 mlx5e_rep_unregister_vport_reps(mdev);
5526 return;
5527 }
5528#endif
5529 priv = vpriv;
2a5e7a13 5530 mlx5e_dcbnl_delete_app(priv);
5e1e93c7 5531 unregister_netdev(priv->netdev);
26e59d80 5532 mlx5e_detach(mdev, vpriv);
2c3b5bee 5533 mlx5e_destroy_netdev(priv);
b50d292b
HHZ
5534}
5535
f62b8bb8 5536static struct mlx5_interface mlx5e_interface = {
b50d292b
HHZ
5537 .add = mlx5e_add,
5538 .remove = mlx5e_remove,
26e59d80
MHY
5539 .attach = mlx5e_attach,
5540 .detach = mlx5e_detach,
f62b8bb8 5541 .protocol = MLX5_INTERFACE_PROTOCOL_ETH,
f62b8bb8
AV
5542};
5543
5544void mlx5e_init(void)
5545{
2ac9cfe7 5546 mlx5e_ipsec_build_inverse_table();
665bc539 5547 mlx5e_build_ptys2ethtool_map();
f62b8bb8
AV
5548 mlx5_register_interface(&mlx5e_interface);
5549}
5550
5551void mlx5e_cleanup(void)
5552{
5553 mlx5_unregister_interface(&mlx5e_interface);
5554}