Merge tag 'gpio-v5.9-2' of git://git.kernel.org/pub/scm/linux/kernel/git/linusw/linux...
[linux-2.6-block.git] / drivers / net / ethernet / mellanox / mlx5 / core / en_rx.c
CommitLineData
e586b3b0
AV
1/*
2 * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
18bcf742 33#include <linux/prefetch.h>
e586b3b0
AV
34#include <linux/ip.h>
35#include <linux/ipv6.h>
36#include <linux/tcp.h>
8babd44d 37#include <net/ip6_checksum.h>
60bbf7ee 38#include <net/page_pool.h>
f007c13d 39#include <net/inet_ecn.h>
e586b3b0 40#include "en.h"
5d0b8476 41#include "en/txrx.h"
12185a9f 42#include "en_tc.h"
f5f82476 43#include "eswitch.h"
1d447a39 44#include "en_rep.h"
768c3667 45#include "en/rep/tc.h"
4301ba7b 46#include "ipoib/ipoib.h"
5adf4c47
TT
47#include "accel/ipsec.h"
48#include "fpga/ipsec.h"
899a59d3 49#include "en_accel/ipsec_rxtx.h"
00aebab2 50#include "en_accel/tls_rxtx.h"
7c39afb3 51#include "lib/clock.h"
159d2131 52#include "en/xdp.h"
db05815b 53#include "en/xsk/rx.h"
0a35ab3e 54#include "en/health.h"
5adf4c47 55#include "en/params.h"
47c97e6b 56#include "en/txrx.h"
5adf4c47
TT
57
58static struct sk_buff *
59mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
60 u16 cqe_bcnt, u32 head_offset, u32 page_idx);
61static struct sk_buff *
62mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
63 u16 cqe_bcnt, u32 head_offset, u32 page_idx);
64static void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
65static void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
66
67const struct mlx5e_rx_handlers mlx5e_rx_handlers_nic = {
68 .handle_rx_cqe = mlx5e_handle_rx_cqe,
69 .handle_rx_cqe_mpwqe = mlx5e_handle_rx_cqe_mpwrq,
70};
e586b3b0 71
7c39afb3 72static inline bool mlx5e_rx_hw_stamp(struct hwtstamp_config *config)
ef9814de 73{
7c39afb3 74 return config->rx_filter == HWTSTAMP_FILTER_ALL;
ef9814de
EBE
75}
76
79d356ef
TT
77static inline void mlx5e_read_cqe_slot(struct mlx5_cqwq *wq,
78 u32 cqcc, void *data)
7219ab34 79{
79d356ef 80 u32 ci = mlx5_cqwq_ctr2ix(wq, cqcc);
7219ab34 81
79d356ef 82 memcpy(data, mlx5_cqwq_get_wqe(wq, ci), sizeof(struct mlx5_cqe64));
7219ab34
TT
83}
84
85static inline void mlx5e_read_title_slot(struct mlx5e_rq *rq,
79d356ef
TT
86 struct mlx5_cqwq *wq,
87 u32 cqcc)
7219ab34 88{
79d356ef
TT
89 struct mlx5e_cq_decomp *cqd = &rq->cqd;
90 struct mlx5_cqe64 *title = &cqd->title;
91
92 mlx5e_read_cqe_slot(wq, cqcc, title);
93 cqd->left = be32_to_cpu(title->byte_cnt);
94 cqd->wqe_counter = be16_to_cpu(title->wqe_counter);
05909bab 95 rq->stats->cqe_compress_blks++;
7219ab34
TT
96}
97
79d356ef
TT
98static inline void mlx5e_read_mini_arr_slot(struct mlx5_cqwq *wq,
99 struct mlx5e_cq_decomp *cqd,
100 u32 cqcc)
7219ab34 101{
79d356ef
TT
102 mlx5e_read_cqe_slot(wq, cqcc, cqd->mini_arr);
103 cqd->mini_arr_idx = 0;
7219ab34
TT
104}
105
79d356ef 106static inline void mlx5e_cqes_update_owner(struct mlx5_cqwq *wq, int n)
7219ab34 107{
79d356ef 108 u32 cqcc = wq->cc;
ddf385e3
TT
109 u8 op_own = mlx5_cqwq_get_ctr_wrap_cnt(wq, cqcc) & 1;
110 u32 ci = mlx5_cqwq_ctr2ix(wq, cqcc);
111 u32 wq_sz = mlx5_cqwq_get_size(wq);
7219ab34
TT
112 u32 ci_top = min_t(u32, wq_sz, ci + n);
113
114 for (; ci < ci_top; ci++, n--) {
79d356ef 115 struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(wq, ci);
7219ab34
TT
116
117 cqe->op_own = op_own;
118 }
119
120 if (unlikely(ci == wq_sz)) {
121 op_own = !op_own;
122 for (ci = 0; ci < n; ci++) {
79d356ef 123 struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(wq, ci);
7219ab34
TT
124
125 cqe->op_own = op_own;
126 }
127 }
128}
129
130static inline void mlx5e_decompress_cqe(struct mlx5e_rq *rq,
79d356ef
TT
131 struct mlx5_cqwq *wq,
132 u32 cqcc)
7219ab34 133{
79d356ef
TT
134 struct mlx5e_cq_decomp *cqd = &rq->cqd;
135 struct mlx5_mini_cqe8 *mini_cqe = &cqd->mini_arr[cqd->mini_arr_idx];
136 struct mlx5_cqe64 *title = &cqd->title;
137
138 title->byte_cnt = mini_cqe->byte_cnt;
139 title->check_sum = mini_cqe->checksum;
140 title->op_own &= 0xf0;
141 title->op_own |= 0x01 & (cqcc >> wq->fbc.log_sz);
142 title->wqe_counter = cpu_to_be16(cqd->wqe_counter);
7219ab34 143
36154be4 144 if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ)
79d356ef 145 cqd->wqe_counter += mpwrq_get_cqe_consumed_strides(title);
36154be4 146 else
79d356ef
TT
147 cqd->wqe_counter =
148 mlx5_wq_cyc_ctr2ix(&rq->wqe.wq, cqd->wqe_counter + 1);
7219ab34
TT
149}
150
151static inline void mlx5e_decompress_cqe_no_hash(struct mlx5e_rq *rq,
79d356ef
TT
152 struct mlx5_cqwq *wq,
153 u32 cqcc)
7219ab34 154{
79d356ef
TT
155 struct mlx5e_cq_decomp *cqd = &rq->cqd;
156
157 mlx5e_decompress_cqe(rq, wq, cqcc);
158 cqd->title.rss_hash_type = 0;
159 cqd->title.rss_hash_result = 0;
7219ab34
TT
160}
161
162static inline u32 mlx5e_decompress_cqes_cont(struct mlx5e_rq *rq,
79d356ef 163 struct mlx5_cqwq *wq,
7219ab34
TT
164 int update_owner_only,
165 int budget_rem)
166{
79d356ef
TT
167 struct mlx5e_cq_decomp *cqd = &rq->cqd;
168 u32 cqcc = wq->cc + update_owner_only;
7219ab34
TT
169 u32 cqe_count;
170 u32 i;
171
79d356ef 172 cqe_count = min_t(u32, cqd->left, budget_rem);
7219ab34
TT
173
174 for (i = update_owner_only; i < cqe_count;
79d356ef
TT
175 i++, cqd->mini_arr_idx++, cqcc++) {
176 if (cqd->mini_arr_idx == MLX5_MINI_CQE_ARRAY_SIZE)
177 mlx5e_read_mini_arr_slot(wq, cqd, cqcc);
7219ab34 178
79d356ef 179 mlx5e_decompress_cqe_no_hash(rq, wq, cqcc);
e9c1d253
TT
180 INDIRECT_CALL_2(rq->handle_rx_cqe, mlx5e_handle_rx_cqe_mpwrq,
181 mlx5e_handle_rx_cqe, rq, &cqd->title);
7219ab34 182 }
79d356ef
TT
183 mlx5e_cqes_update_owner(wq, cqcc - wq->cc);
184 wq->cc = cqcc;
185 cqd->left -= cqe_count;
05909bab 186 rq->stats->cqe_compress_pkts += cqe_count;
7219ab34
TT
187
188 return cqe_count;
189}
190
191static inline u32 mlx5e_decompress_cqes_start(struct mlx5e_rq *rq,
79d356ef 192 struct mlx5_cqwq *wq,
7219ab34
TT
193 int budget_rem)
194{
79d356ef
TT
195 struct mlx5e_cq_decomp *cqd = &rq->cqd;
196 u32 cc = wq->cc;
7219ab34 197
79d356ef
TT
198 mlx5e_read_title_slot(rq, wq, cc);
199 mlx5e_read_mini_arr_slot(wq, cqd, cc + 1);
200 mlx5e_decompress_cqe(rq, wq, cc);
e9c1d253
TT
201 INDIRECT_CALL_2(rq->handle_rx_cqe, mlx5e_handle_rx_cqe_mpwrq,
202 mlx5e_handle_rx_cqe, rq, &cqd->title);
79d356ef
TT
203 cqd->mini_arr_idx++;
204
205 return mlx5e_decompress_cqes_cont(rq, wq, 1, budget_rem) - 1;
7219ab34
TT
206}
207
accd5883
TT
208static inline bool mlx5e_page_is_reserved(struct page *page)
209{
70871f1e 210 return page_is_pfmemalloc(page) || page_to_nid(page) != numa_mem_id();
accd5883
TT
211}
212
1bfecfca
SM
213static inline bool mlx5e_rx_cache_put(struct mlx5e_rq *rq,
214 struct mlx5e_dma_info *dma_info)
e586b3b0 215{
1bfecfca
SM
216 struct mlx5e_page_cache *cache = &rq->page_cache;
217 u32 tail_next = (cache->tail + 1) & (MLX5E_CACHE_SIZE - 1);
05909bab 218 struct mlx5e_rq_stats *stats = rq->stats;
e586b3b0 219
1bfecfca 220 if (tail_next == cache->head) {
05909bab 221 stats->cache_full++;
1bfecfca
SM
222 return false;
223 }
e586b3b0 224
70871f1e 225 if (unlikely(mlx5e_page_is_reserved(dma_info->page))) {
05909bab 226 stats->cache_waive++;
e048fc50 227 return false;
70871f1e 228 }
e048fc50 229
1bfecfca
SM
230 cache->page_cache[cache->tail] = *dma_info;
231 cache->tail = tail_next;
232 return true;
233}
e586b3b0 234
1bfecfca
SM
235static inline bool mlx5e_rx_cache_get(struct mlx5e_rq *rq,
236 struct mlx5e_dma_info *dma_info)
237{
238 struct mlx5e_page_cache *cache = &rq->page_cache;
05909bab 239 struct mlx5e_rq_stats *stats = rq->stats;
1bfecfca
SM
240
241 if (unlikely(cache->head == cache->tail)) {
05909bab 242 stats->cache_empty++;
1bfecfca
SM
243 return false;
244 }
245
246 if (page_ref_count(cache->page_cache[cache->head].page) != 1) {
05909bab 247 stats->cache_busy++;
1bfecfca
SM
248 return false;
249 }
e586b3b0 250
1bfecfca
SM
251 *dma_info = cache->page_cache[cache->head];
252 cache->head = (cache->head + 1) & (MLX5E_CACHE_SIZE - 1);
05909bab 253 stats->cache_reuse++;
e586b3b0 254
1bfecfca 255 dma_sync_single_for_device(rq->pdev, dma_info->addr,
069d1146 256 PAGE_SIZE,
1bfecfca
SM
257 DMA_FROM_DEVICE);
258 return true;
259}
260
db05815b
MM
261static inline int mlx5e_page_alloc_pool(struct mlx5e_rq *rq,
262 struct mlx5e_dma_info *dma_info)
1bfecfca 263{
1bfecfca
SM
264 if (mlx5e_rx_cache_get(rq, dma_info))
265 return 0;
266
60bbf7ee 267 dma_info->page = page_pool_dev_alloc_pages(rq->page_pool);
2e50b261 268 if (unlikely(!dma_info->page))
1bfecfca
SM
269 return -ENOMEM;
270
2e50b261 271 dma_info->addr = dma_map_page(rq->pdev, dma_info->page, 0,
069d1146 272 PAGE_SIZE, rq->buff.map_dir);
1bfecfca 273 if (unlikely(dma_mapping_error(rq->pdev, dma_info->addr))) {
29b006a6 274 page_pool_recycle_direct(rq->page_pool, dma_info->page);
2e50b261 275 dma_info->page = NULL;
1bfecfca
SM
276 return -ENOMEM;
277 }
e586b3b0
AV
278
279 return 0;
1bfecfca
SM
280}
281
db05815b
MM
282static inline int mlx5e_page_alloc(struct mlx5e_rq *rq,
283 struct mlx5e_dma_info *dma_info)
284{
285 if (rq->umem)
286 return mlx5e_xsk_page_alloc_umem(rq, dma_info);
287 else
288 return mlx5e_page_alloc_pool(rq, dma_info);
289}
290
159d2131 291void mlx5e_page_dma_unmap(struct mlx5e_rq *rq, struct mlx5e_dma_info *dma_info)
5168d732 292{
069d1146 293 dma_unmap_page(rq->pdev, dma_info->addr, PAGE_SIZE, rq->buff.map_dir);
5168d732
JDB
294}
295
db05815b
MM
296void mlx5e_page_release_dynamic(struct mlx5e_rq *rq,
297 struct mlx5e_dma_info *dma_info,
298 bool recycle)
1bfecfca 299{
60bbf7ee
JDB
300 if (likely(recycle)) {
301 if (mlx5e_rx_cache_put(rq, dma_info))
302 return;
303
304 mlx5e_page_dma_unmap(rq, dma_info);
305 page_pool_recycle_direct(rq->page_pool, dma_info->page);
306 } else {
307 mlx5e_page_dma_unmap(rq, dma_info);
29b006a6 308 page_pool_release_page(rq->page_pool, dma_info->page);
60bbf7ee
JDB
309 put_page(dma_info->page);
310 }
1bfecfca
SM
311}
312
db05815b
MM
313static inline void mlx5e_page_release(struct mlx5e_rq *rq,
314 struct mlx5e_dma_info *dma_info,
315 bool recycle)
316{
317 if (rq->umem)
318 /* The `recycle` parameter is ignored, and the page is always
319 * put into the Reuse Ring, because there is no way to return
320 * the page to the userspace when the interface goes down.
321 */
39d6443c 322 xsk_buff_free(dma_info->xsk);
db05815b
MM
323 else
324 mlx5e_page_release_dynamic(rq, dma_info, recycle);
325}
326
069d1146
TT
327static inline int mlx5e_get_rx_frag(struct mlx5e_rq *rq,
328 struct mlx5e_wqe_frag_info *frag)
329{
330 int err = 0;
331
332 if (!frag->offset)
333 /* On first frag (offset == 0), replenish page (dma_info actually).
334 * Other frags that point to the same dma_info (with a different
335 * offset) should just use the new one without replenishing again
336 * by themselves.
337 */
db05815b 338 err = mlx5e_page_alloc(rq, frag->di);
069d1146
TT
339
340 return err;
341}
342
343static inline void mlx5e_put_rx_frag(struct mlx5e_rq *rq,
cb5189d1
TT
344 struct mlx5e_wqe_frag_info *frag,
345 bool recycle)
accd5883 346{
069d1146 347 if (frag->last_in_page)
cb5189d1 348 mlx5e_page_release(rq, frag->di, recycle);
accd5883
TT
349}
350
99cbfa93
TT
351static inline struct mlx5e_wqe_frag_info *get_frag(struct mlx5e_rq *rq, u16 ix)
352{
069d1146 353 return &rq->wqe.frags[ix << rq->wqe.info.log_num_frags];
99cbfa93
TT
354}
355
069d1146
TT
356static int mlx5e_alloc_rx_wqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe_cyc *wqe,
357 u16 ix)
1bfecfca 358{
069d1146
TT
359 struct mlx5e_wqe_frag_info *frag = get_frag(rq, ix);
360 int err;
361 int i;
e586b3b0 362
069d1146
TT
363 for (i = 0; i < rq->wqe.info.num_frags; i++, frag++) {
364 err = mlx5e_get_rx_frag(rq, frag);
365 if (unlikely(err))
366 goto free_frags;
367
368 wqe->data[i].addr = cpu_to_be64(frag->di->addr +
369 frag->offset + rq->buff.headroom);
accd5883 370 }
e586b3b0 371
1bfecfca 372 return 0;
069d1146
TT
373
374free_frags:
375 while (--i >= 0)
cb5189d1 376 mlx5e_put_rx_frag(rq, --frag, true);
069d1146
TT
377
378 return err;
e586b3b0
AV
379}
380
accd5883 381static inline void mlx5e_free_rx_wqe(struct mlx5e_rq *rq,
cb5189d1
TT
382 struct mlx5e_wqe_frag_info *wi,
383 bool recycle)
accd5883 384{
069d1146
TT
385 int i;
386
387 for (i = 0; i < rq->wqe.info.num_frags; i++, wi++)
cb5189d1 388 mlx5e_put_rx_frag(rq, wi, recycle);
accd5883
TT
389}
390
5adf4c47 391static void mlx5e_dealloc_rx_wqe(struct mlx5e_rq *rq, u16 ix)
accd5883 392{
069d1146 393 struct mlx5e_wqe_frag_info *wi = get_frag(rq, ix);
accd5883 394
cb5189d1 395 mlx5e_free_rx_wqe(rq, wi, false);
accd5883
TT
396}
397
069d1146 398static int mlx5e_alloc_rx_wqes(struct mlx5e_rq *rq, u16 ix, u8 wqe_bulk)
6cd392a0 399{
069d1146
TT
400 struct mlx5_wq_cyc *wq = &rq->wqe.wq;
401 int err;
402 int i;
6cd392a0 403
db05815b
MM
404 if (rq->umem) {
405 int pages_desired = wqe_bulk << rq->wqe.info.log_num_frags;
406
39d6443c
BT
407 /* Check in advance that we have enough frames, instead of
408 * allocating one-by-one, failing and moving frames to the
409 * Reuse Ring.
410 */
411 if (unlikely(!xsk_buff_can_alloc(rq->umem, pages_desired)))
db05815b
MM
412 return -ENOMEM;
413 }
414
069d1146
TT
415 for (i = 0; i < wqe_bulk; i++) {
416 struct mlx5e_rx_wqe_cyc *wqe = mlx5_wq_cyc_get_wqe(wq, ix + i);
417
418 err = mlx5e_alloc_rx_wqe(rq, wqe, ix + i);
419 if (unlikely(err))
420 goto free_wqes;
421 }
422
423 return 0;
424
425free_wqes:
426 while (--i >= 0)
427 mlx5e_dealloc_rx_wqe(rq, ix + i);
428
429 return err;
6cd392a0
DJ
430}
431
fa698366
TT
432static inline void
433mlx5e_add_skb_frag(struct mlx5e_rq *rq, struct sk_buff *skb,
434 struct mlx5e_dma_info *di, u32 frag_offset, u32 len,
435 unsigned int truesize)
bc77b240 436{
d9d9f156 437 dma_sync_single_for_cpu(rq->pdev,
9f9e9cd5 438 di->addr + frag_offset,
bc77b240 439 len, DMA_FROM_DEVICE);
9f9e9cd5 440 page_ref_inc(di->page);
bc77b240 441 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
9f9e9cd5 442 di->page, frag_offset, len, truesize);
bc77b240
TT
443}
444
386471f1
TT
445static inline void
446mlx5e_copy_skb_header(struct device *pdev, struct sk_buff *skb,
447 struct mlx5e_dma_info *dma_info,
94816278 448 int offset_from, u32 headlen)
386471f1
TT
449{
450 const void *from = page_address(dma_info->page) + offset_from;
451 /* Aligning len to sizeof(long) optimizes memcpy performance */
452 unsigned int len = ALIGN(headlen, sizeof(long));
453
454 dma_sync_single_for_cpu(pdev, dma_info->addr + offset_from, len,
455 DMA_FROM_DEVICE);
94816278 456 skb_copy_to_linear_data(skb, from, len);
bc77b240
TT
457}
458
cb5189d1
TT
459static void
460mlx5e_free_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi, bool recycle)
bc77b240 461{
db05815b 462 bool no_xdp_xmit;
22f45398 463 struct mlx5e_dma_info *dma_info = wi->umr.dma_info;
18187fb2 464 int i;
bc77b240 465
db05815b
MM
466 /* A common case for AF_XDP. */
467 if (bitmap_full(wi->xdp_xmit_bitmap, MLX5_MPWRQ_PAGES_PER_WQE))
468 return;
469
470 no_xdp_xmit = bitmap_empty(wi->xdp_xmit_bitmap,
471 MLX5_MPWRQ_PAGES_PER_WQE);
472
9f9e9cd5 473 for (i = 0; i < MLX5_MPWRQ_PAGES_PER_WQE; i++)
22f45398 474 if (no_xdp_xmit || !test_bit(i, wi->xdp_xmit_bitmap))
cb5189d1 475 mlx5e_page_release(rq, &dma_info[i], recycle);
18187fb2 476}
bc77b240 477
fd9b4be8 478static void mlx5e_post_rx_mpwqe(struct mlx5e_rq *rq, u8 n)
18187fb2 479{
422d4c40 480 struct mlx5_wq_ll *wq = &rq->mpwqe.wq;
7e426671 481
fd9b4be8
TT
482 do {
483 u16 next_wqe_index = mlx5_wq_ll_get_wqe_next_ix(wq, wq->head);
18187fb2 484
fd9b4be8
TT
485 mlx5_wq_ll_push(wq, next_wqe_index);
486 } while (--n);
18187fb2
TT
487
488 /* ensure wqes are visible to device before updating doorbell record */
489 dma_wmb();
490
491 mlx5_wq_ll_update_db_record(wq);
bc77b240
TT
492}
493
18187fb2 494static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
bc77b240 495{
21c59685 496 struct mlx5e_mpw_info *wi = &rq->mpwqe.info[ix];
4c2af5cc 497 struct mlx5e_dma_info *dma_info = &wi->umr.dma_info[0];
18187fb2
TT
498 struct mlx5e_icosq *sq = &rq->channel->icosq;
499 struct mlx5_wq_cyc *wq = &sq->wq;
ea3886ca 500 struct mlx5e_umr_wqe *umr_wqe;
b8a98a4c 501 u16 xlt_offset = ix << (MLX5E_LOG_ALIGNED_MPWQE_PPW - 1);
ec9cdca0 502 u16 pi;
7e426671 503 int err;
bc77b240
TT
504 int i;
505
39d6443c
BT
506 /* Check in advance that we have enough frames, instead of allocating
507 * one-by-one, failing and moving frames to the Reuse Ring.
508 */
db05815b 509 if (rq->umem &&
39d6443c 510 unlikely(!xsk_buff_can_alloc(rq->umem, MLX5_MPWRQ_PAGES_PER_WQE))) {
db05815b
MM
511 err = -ENOMEM;
512 goto err;
513 }
514
ec9cdca0 515 pi = mlx5e_icosq_get_next_pi(sq, MLX5E_UMR_WQEBBS);
ea3886ca 516 umr_wqe = mlx5_wq_cyc_get_wqe(wq, pi);
ed084fb6 517 memcpy(umr_wqe, &rq->mpwqe.umr_wqe, offsetof(struct mlx5e_umr_wqe, inline_mtts));
ab966d7e 518
4c2af5cc 519 for (i = 0; i < MLX5_MPWRQ_PAGES_PER_WQE; i++, dma_info++) {
db05815b 520 err = mlx5e_page_alloc(rq, dma_info);
7e426671 521 if (unlikely(err))
bc77b240 522 goto err_unmap;
ea3886ca 523 umr_wqe->inline_mtts[i].ptag = cpu_to_be64(dma_info->addr | MLX5_EN_WR);
bc77b240
TT
524 }
525
22f45398 526 bitmap_zero(wi->xdp_xmit_bitmap, MLX5_MPWRQ_PAGES_PER_WQE);
bc77b240 527 wi->consumed_strides = 0;
bc77b240 528
ea3886ca 529 umr_wqe->ctrl.opmod_idx_opcode =
18187fb2
TT
530 cpu_to_be32((sq->pc << MLX5_WQE_CTRL_WQE_INDEX_SHIFT) |
531 MLX5_OPCODE_UMR);
b8a98a4c 532 umr_wqe->uctrl.xlt_offset = cpu_to_be16(xlt_offset);
18187fb2 533
41a8e4eb 534 sq->db.wqe_info[pi] = (struct mlx5e_icosq_wqe_info) {
28bff095 535 .wqe_type = MLX5E_ICOSQ_WQE_UMR_RX,
41a8e4eb
TT
536 .num_wqebbs = MLX5E_UMR_WQEBBS,
537 .umr.rq = rq,
538 };
539
ea3886ca 540 sq->pc += MLX5E_UMR_WQEBBS;
fd9b4be8
TT
541
542 sq->doorbell_cseg = &umr_wqe->ctrl;
18187fb2 543
bc77b240
TT
544 return 0;
545
546err_unmap:
547 while (--i >= 0) {
4c2af5cc 548 dma_info--;
4415a031 549 mlx5e_page_release(rq, dma_info, true);
bc77b240 550 }
db05815b
MM
551
552err:
05909bab 553 rq->stats->buff_alloc_err++;
bc77b240 554
7e426671 555 return err;
bc77b240
TT
556}
557
5adf4c47 558static void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
6cd392a0 559{
21c59685 560 struct mlx5e_mpw_info *wi = &rq->mpwqe.info[ix];
cb5189d1
TT
561 /* Don't recycle, this function is called on rq/netdev close */
562 mlx5e_free_rx_mpwqe(rq, wi, false);
6cd392a0
DJ
563}
564
5d0b8476 565INDIRECT_CALLABLE_SCOPE bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq)
e586b3b0 566{
99cbfa93 567 struct mlx5_wq_cyc *wq = &rq->wqe.wq;
069d1146 568 u8 wqe_bulk;
4b7dfc99 569 int err;
e586b3b0 570
0e5c04f6 571 if (unlikely(!test_bit(MLX5E_RQ_STATE_ENABLED, &rq->state)))
e586b3b0
AV
572 return false;
573
069d1146
TT
574 wqe_bulk = rq->wqe.info.wqe_bulk;
575
576 if (mlx5_wq_cyc_missing(wq) < wqe_bulk)
4b7dfc99
TT
577 return false;
578
4b7dfc99 579 do {
99cbfa93 580 u16 head = mlx5_wq_cyc_get_head(wq);
e586b3b0 581
069d1146 582 err = mlx5e_alloc_rx_wqes(rq, head, wqe_bulk);
54984407 583 if (unlikely(err)) {
05909bab 584 rq->stats->buff_alloc_err++;
e586b3b0 585 break;
54984407 586 }
e586b3b0 587
069d1146
TT
588 mlx5_wq_cyc_push_n(wq, wqe_bulk);
589 } while (mlx5_wq_cyc_missing(wq) >= wqe_bulk);
e586b3b0
AV
590
591 /* ensure wqes are visible to device before updating doorbell record */
592 dma_wmb();
593
99cbfa93 594 mlx5_wq_cyc_update_db_record(wq);
e586b3b0 595
4b7dfc99 596 return !!err;
e586b3b0
AV
597}
598
1182f365
TT
599void mlx5e_free_icosq_descs(struct mlx5e_icosq *sq)
600{
601 u16 sqcc;
602
603 sqcc = sq->cc;
604
605 while (sqcc != sq->pc) {
606 struct mlx5e_icosq_wqe_info *wi;
607 u16 ci;
608
609 ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sqcc);
610 wi = &sq->db.wqe_info[ci];
611 sqcc += wi->num_wqebbs;
612#ifdef CONFIG_MLX5_EN_TLS
613 switch (wi->wqe_type) {
614 case MLX5E_ICOSQ_WQE_SET_PSV_TLS:
615 mlx5e_ktls_handle_ctx_completion(wi);
616 break;
0419d8c9
TT
617 case MLX5E_ICOSQ_WQE_GET_PSV_TLS:
618 mlx5e_ktls_handle_get_psv_completion(wi, sq);
619 break;
1182f365
TT
620 }
621#endif
622 }
623 sq->cc = sqcc;
624}
625
e7e0004a 626int mlx5e_poll_ico_cq(struct mlx5e_cq *cq)
7cc6d77b
TT
627{
628 struct mlx5e_icosq *sq = container_of(cq, struct mlx5e_icosq, cq);
629 struct mlx5_cqe64 *cqe;
fd9b4be8
TT
630 u16 sqcc;
631 int i;
7cc6d77b 632
0e5c04f6 633 if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &sq->state)))
e7e0004a 634 return 0;
7cc6d77b
TT
635
636 cqe = mlx5_cqwq_get_cqe(&cq->wq);
637 if (likely(!cqe))
e7e0004a 638 return 0;
7cc6d77b 639
fd9b4be8
TT
640 /* sq->cc must be updated only after mlx5_cqwq_update_db_record(),
641 * otherwise a cq overrun may occur
642 */
643 sqcc = sq->cc;
644
645 i = 0;
646 do {
647 u16 wqe_counter;
648 bool last_wqe;
649
650 mlx5_cqwq_pop(&cq->wq);
651
652 wqe_counter = be16_to_cpu(cqe->wqe_counter);
653
fd9b4be8 654 do {
7d42c8e9 655 struct mlx5e_icosq_wqe_info *wi;
fd9b4be8
TT
656 u16 ci;
657
658 last_wqe = (sqcc == wqe_counter);
659
660 ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sqcc);
7d42c8e9 661 wi = &sq->db.wqe_info[ci];
1de0306c 662 sqcc += wi->num_wqebbs;
fd9b4be8 663
b57e66ad
TT
664 if (last_wqe && unlikely(get_cqe_opcode(cqe) != MLX5_CQE_REQ)) {
665 netdev_WARN_ONCE(cq->channel->netdev,
666 "Bad OP in ICOSQ CQE: 0x%x\n",
667 get_cqe_opcode(cqe));
f1b95753
TT
668 mlx5e_dump_error_cqe(&sq->cq, sq->sqn,
669 (struct mlx5_err_cqe *)cqe);
b57e66ad
TT
670 if (!test_and_set_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state))
671 queue_work(cq->channel->priv->wq, &sq->recover_work);
672 break;
673 }
674
28bff095
TT
675 switch (wi->wqe_type) {
676 case MLX5E_ICOSQ_WQE_UMR_RX:
ed084fb6 677 wi->umr.rq->mpwqe.umr_completed++;
28bff095
TT
678 break;
679 case MLX5E_ICOSQ_WQE_NOP:
680 break;
1182f365
TT
681#ifdef CONFIG_MLX5_EN_TLS
682 case MLX5E_ICOSQ_WQE_UMR_TLS:
683 break;
684 case MLX5E_ICOSQ_WQE_SET_PSV_TLS:
685 mlx5e_ktls_handle_ctx_completion(wi);
686 break;
0419d8c9
TT
687 case MLX5E_ICOSQ_WQE_GET_PSV_TLS:
688 mlx5e_ktls_handle_get_psv_completion(wi, sq);
689 break;
1182f365 690#endif
28bff095 691 default:
fd9b4be8 692 netdev_WARN_ONCE(cq->channel->netdev,
28bff095
TT
693 "Bad WQE type in ICOSQ WQE info: 0x%x\n",
694 wi->wqe_type);
695 }
fd9b4be8 696 } while (!last_wqe);
fd9b4be8
TT
697 } while ((++i < MLX5E_TX_CQ_POLL_BUDGET) && (cqe = mlx5_cqwq_get_cqe(&cq->wq)));
698
699 sq->cc = sqcc;
7cc6d77b
TT
700
701 mlx5_cqwq_update_db_record(&cq->wq);
e7e0004a
MM
702
703 return i;
7cc6d77b
TT
704}
705
5d0b8476 706INDIRECT_CALLABLE_SCOPE bool mlx5e_post_rx_mpwqes(struct mlx5e_rq *rq)
7cc6d77b 707{
fd9b4be8 708 struct mlx5e_icosq *sq = &rq->channel->icosq;
422d4c40 709 struct mlx5_wq_ll *wq = &rq->mpwqe.wq;
ed084fb6 710 u8 umr_completed = rq->mpwqe.umr_completed;
db05815b 711 int alloc_err = 0;
fd9b4be8
TT
712 u8 missing, i;
713 u16 head;
7cc6d77b 714
0e5c04f6 715 if (unlikely(!test_bit(MLX5E_RQ_STATE_ENABLED, &rq->state)))
7cc6d77b
TT
716 return false;
717
ed084fb6
MM
718 if (umr_completed) {
719 mlx5e_post_rx_mpwqe(rq, umr_completed);
720 rq->mpwqe.umr_in_progress -= umr_completed;
721 rq->mpwqe.umr_completed = 0;
722 }
fd9b4be8
TT
723
724 missing = mlx5_wq_ll_missing(wq) - rq->mpwqe.umr_in_progress;
7cc6d77b 725
fd9b4be8
TT
726 if (unlikely(rq->mpwqe.umr_in_progress > rq->mpwqe.umr_last_bulk))
727 rq->stats->congst_umr++;
728
729#define UMR_WQE_BULK (2)
730 if (likely(missing < UMR_WQE_BULK))
7cc6d77b
TT
731 return false;
732
fd9b4be8
TT
733 head = rq->mpwqe.actual_wq_head;
734 i = missing;
735 do {
db05815b
MM
736 alloc_err = mlx5e_alloc_rx_mpwqe(rq, head);
737
738 if (unlikely(alloc_err))
fd9b4be8
TT
739 break;
740 head = mlx5_wq_ll_get_wqe_next_ix(wq, head);
741 } while (--i);
742
743 rq->mpwqe.umr_last_bulk = missing - i;
744 if (sq->doorbell_cseg) {
745 mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, sq->doorbell_cseg);
746 sq->doorbell_cseg = NULL;
747 }
748
749 rq->mpwqe.umr_in_progress += rq->mpwqe.umr_last_bulk;
750 rq->mpwqe.actual_wq_head = head;
7cc6d77b 751
a7bd4018
MM
752 /* If XSK Fill Ring doesn't have enough frames, report the error, so
753 * that one of the actions can be performed:
754 * 1. If need_wakeup is used, signal that the application has to kick
755 * the driver when it refills the Fill Ring.
756 * 2. Otherwise, busy poll by rescheduling the NAPI poll.
db05815b
MM
757 */
758 if (unlikely(alloc_err == -ENOMEM && rq->umem))
759 return true;
760
e4d86a4a 761 return false;
7cc6d77b
TT
762}
763
8babd44d
GP
764static void mlx5e_lro_update_tcp_hdr(struct mlx5_cqe64 *cqe, struct tcphdr *tcp)
765{
766 u8 l4_hdr_type = get_cqe_l4_hdr_type(cqe);
767 u8 tcp_ack = (l4_hdr_type == CQE_L4_HDR_TYPE_TCP_ACK_NO_DATA) ||
768 (l4_hdr_type == CQE_L4_HDR_TYPE_TCP_ACK_AND_DATA);
769
770 tcp->check = 0;
771 tcp->psh = get_cqe_lro_tcppsh(cqe);
772
773 if (tcp_ack) {
774 tcp->ack = 1;
775 tcp->ack_seq = cqe->lro_ack_seq_num;
776 tcp->window = cqe->lro_tcp_win;
777 }
778}
779
461017cb
TT
780static void mlx5e_lro_update_hdr(struct sk_buff *skb, struct mlx5_cqe64 *cqe,
781 u32 cqe_bcnt)
e586b3b0 782{
cd17d230 783 struct ethhdr *eth = (struct ethhdr *)(skb->data);
e586b3b0 784 struct tcphdr *tcp;
cd17d230 785 int network_depth = 0;
8babd44d 786 __wsum check;
cd17d230
GP
787 __be16 proto;
788 u16 tot_len;
604acb19 789 void *ip_p;
e586b3b0 790
cd17d230 791 proto = __vlan_get_protocol(skb, eth->h_proto, &network_depth);
e586b3b0 792
cd17d230 793 tot_len = cqe_bcnt - network_depth;
604acb19 794 ip_p = skb->data + network_depth;
cd17d230
GP
795
796 if (proto == htons(ETH_P_IP)) {
604acb19 797 struct iphdr *ipv4 = ip_p;
e586b3b0 798
604acb19
TT
799 tcp = ip_p + sizeof(struct iphdr);
800 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
e586b3b0 801
e586b3b0
AV
802 ipv4->ttl = cqe->lro_min_ttl;
803 ipv4->tot_len = cpu_to_be16(tot_len);
804 ipv4->check = 0;
805 ipv4->check = ip_fast_csum((unsigned char *)ipv4,
806 ipv4->ihl);
8babd44d
GP
807
808 mlx5e_lro_update_tcp_hdr(cqe, tcp);
809 check = csum_partial(tcp, tcp->doff * 4,
810 csum_unfold((__force __sum16)cqe->check_sum));
811 /* Almost done, don't forget the pseudo header */
812 tcp->check = csum_tcpudp_magic(ipv4->saddr, ipv4->daddr,
813 tot_len - sizeof(struct iphdr),
814 IPPROTO_TCP, check);
e586b3b0 815 } else {
8babd44d 816 u16 payload_len = tot_len - sizeof(struct ipv6hdr);
604acb19
TT
817 struct ipv6hdr *ipv6 = ip_p;
818
819 tcp = ip_p + sizeof(struct ipv6hdr);
820 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
821
e586b3b0 822 ipv6->hop_limit = cqe->lro_min_ttl;
8babd44d
GP
823 ipv6->payload_len = cpu_to_be16(payload_len);
824
825 mlx5e_lro_update_tcp_hdr(cqe, tcp);
826 check = csum_partial(tcp, tcp->doff * 4,
827 csum_unfold((__force __sum16)cqe->check_sum));
828 /* Almost done, don't forget the pseudo header */
829 tcp->check = csum_ipv6_magic(&ipv6->saddr, &ipv6->daddr, payload_len,
830 IPPROTO_TCP, check);
604acb19 831 }
e586b3b0
AV
832}
833
834static inline void mlx5e_skb_set_hash(struct mlx5_cqe64 *cqe,
835 struct sk_buff *skb)
836{
837 u8 cht = cqe->rss_hash_type;
838 int ht = (cht & CQE_RSS_HTYPE_L4) ? PKT_HASH_TYPE_L4 :
839 (cht & CQE_RSS_HTYPE_IP) ? PKT_HASH_TYPE_L3 :
840 PKT_HASH_TYPE_NONE;
841 skb_set_hash(skb, be32_to_cpu(cqe->rss_hash_result), ht);
842}
843
f007c13d
NS
844static inline bool is_last_ethertype_ip(struct sk_buff *skb, int *network_depth,
845 __be16 *proto)
bbceefce 846{
f007c13d
NS
847 *proto = ((struct ethhdr *)skb->data)->h_proto;
848 *proto = __vlan_get_protocol(skb, *proto, network_depth);
0318a7b7
SM
849
850 if (*proto == htons(ETH_P_IP))
851 return pskb_may_pull(skb, *network_depth + sizeof(struct iphdr));
852
853 if (*proto == htons(ETH_P_IPV6))
854 return pskb_may_pull(skb, *network_depth + sizeof(struct ipv6hdr));
855
856 return false;
f007c13d
NS
857}
858
859static inline void mlx5e_enable_ecn(struct mlx5e_rq *rq, struct sk_buff *skb)
860{
861 int network_depth = 0;
862 __be16 proto;
863 void *ip;
864 int rc;
bbceefce 865
f007c13d
NS
866 if (unlikely(!is_last_ethertype_ip(skb, &network_depth, &proto)))
867 return;
868
869 ip = skb->data + network_depth;
870 rc = ((proto == htons(ETH_P_IP)) ? IP_ECN_set_ce((struct iphdr *)ip) :
871 IP6_ECN_set_ce(skb, (struct ipv6hdr *)ip));
872
873 rq->stats->ecn_mark += !!rc;
bbceefce
AS
874}
875
ef6fcd45 876static u8 get_ip_proto(struct sk_buff *skb, int network_depth, __be16 proto)
fe1dc069 877{
ef6fcd45 878 void *ip_p = skb->data + network_depth;
fe1dc069
AH
879
880 return (proto == htons(ETH_P_IP)) ? ((struct iphdr *)ip_p)->protocol :
881 ((struct ipv6hdr *)ip_p)->nexthdr;
882}
883
e8c8b53c
CW
884#define short_frame(size) ((size) <= ETH_ZLEN + ETH_FCS_LEN)
885
0aa1d186
SM
886#define MAX_PADDING 8
887
888static void
889tail_padding_csum_slow(struct sk_buff *skb, int offset, int len,
890 struct mlx5e_rq_stats *stats)
891{
892 stats->csum_complete_tail_slow++;
893 skb->csum = csum_block_add(skb->csum,
894 skb_checksum(skb, offset, len, 0),
895 offset);
896}
897
898static void
899tail_padding_csum(struct sk_buff *skb, int offset,
900 struct mlx5e_rq_stats *stats)
901{
902 u8 tail_padding[MAX_PADDING];
903 int len = skb->len - offset;
904 void *tail;
905
906 if (unlikely(len > MAX_PADDING)) {
907 tail_padding_csum_slow(skb, offset, len, stats);
908 return;
909 }
910
911 tail = skb_header_pointer(skb, offset, len, tail_padding);
912 if (unlikely(!tail)) {
913 tail_padding_csum_slow(skb, offset, len, stats);
914 return;
915 }
916
917 stats->csum_complete_tail++;
918 skb->csum = csum_block_add(skb->csum, csum_partial(tail, len, 0), offset);
919}
920
921static void
8c7698d5
SM
922mlx5e_skb_csum_fixup(struct sk_buff *skb, int network_depth, __be16 proto,
923 struct mlx5e_rq_stats *stats)
0aa1d186
SM
924{
925 struct ipv6hdr *ip6;
926 struct iphdr *ip4;
927 int pkt_len;
928
8c7698d5
SM
929 /* Fixup vlan headers, if any */
930 if (network_depth > ETH_HLEN)
931 /* CQE csum is calculated from the IP header and does
932 * not cover VLAN headers (if present). This will add
933 * the checksum manually.
934 */
935 skb->csum = csum_partial(skb->data + ETH_HLEN,
936 network_depth - ETH_HLEN,
937 skb->csum);
938
939 /* Fixup tail padding, if any */
0aa1d186
SM
940 switch (proto) {
941 case htons(ETH_P_IP):
942 ip4 = (struct iphdr *)(skb->data + network_depth);
943 pkt_len = network_depth + ntohs(ip4->tot_len);
944 break;
945 case htons(ETH_P_IPV6):
946 ip6 = (struct ipv6hdr *)(skb->data + network_depth);
947 pkt_len = network_depth + sizeof(*ip6) + ntohs(ip6->payload_len);
948 break;
949 default:
950 return;
951 }
952
953 if (likely(pkt_len >= skb->len))
954 return;
955
956 tail_padding_csum(skb, pkt_len, stats);
957}
958
bbceefce
AS
959static inline void mlx5e_handle_csum(struct net_device *netdev,
960 struct mlx5_cqe64 *cqe,
961 struct mlx5e_rq *rq,
5f6d12d1
MF
962 struct sk_buff *skb,
963 bool lro)
bbceefce 964{
05909bab 965 struct mlx5e_rq_stats *stats = rq->stats;
f938daee 966 int network_depth = 0;
f007c13d 967 __be16 proto;
f938daee 968
bbceefce
AS
969 if (unlikely(!(netdev->features & NETIF_F_RXCSUM)))
970 goto csum_none;
971
5f6d12d1 972 if (lro) {
bbceefce 973 skb->ip_summed = CHECKSUM_UNNECESSARY;
05909bab 974 stats->csum_unnecessary++;
1b223dd3
SM
975 return;
976 }
977
5d0bb3ba
SM
978 /* True when explicitly set via priv flag, or XDP prog is loaded */
979 if (test_bit(MLX5E_RQ_STATE_NO_CSUM_COMPLETE, &rq->state))
b856df28
OG
980 goto csum_unnecessary;
981
e8c8b53c
CW
982 /* CQE csum doesn't cover padding octets in short ethernet
983 * frames. And the pad field is appended prior to calculating
984 * and appending the FCS field.
985 *
986 * Detecting these padded frames requires to verify and parse
987 * IP headers, so we simply force all those small frames to be
988 * CHECKSUM_UNNECESSARY even if they are not padded.
989 */
990 if (short_frame(skb->len))
991 goto csum_unnecessary;
992
f007c13d 993 if (likely(is_last_ethertype_ip(skb, &network_depth, &proto))) {
b2ac7541
RS
994 u8 ipproto = get_ip_proto(skb, network_depth, proto);
995
996 if (unlikely(ipproto == IPPROTO_SCTP))
fe1dc069
AH
997 goto csum_unnecessary;
998
b2ac7541
RS
999 if (unlikely(mlx5_ipsec_is_rx_flow(cqe)))
1000 goto csum_none;
1001
db849faa 1002 stats->csum_complete++;
bbceefce 1003 skb->ip_summed = CHECKSUM_COMPLETE;
ecf842f6 1004 skb->csum = csum_unfold((__force __sum16)cqe->check_sum);
db849faa
SM
1005
1006 if (test_bit(MLX5E_RQ_STATE_CSUM_FULL, &rq->state))
1007 return; /* CQE csum covers all received bytes */
1008
1009 /* csum might need some fixups ...*/
8c7698d5 1010 mlx5e_skb_csum_fixup(skb, network_depth, proto, stats);
1b223dd3 1011 return;
bbceefce
AS
1012 }
1013
fe1dc069 1014csum_unnecessary:
1b223dd3 1015 if (likely((cqe->hds_ip_ext & CQE_L3_OK) &&
8c8811d4 1016 (cqe->hds_ip_ext & CQE_L4_OK))) {
1b223dd3
SM
1017 skb->ip_summed = CHECKSUM_UNNECESSARY;
1018 if (cqe_is_tunneled(cqe)) {
1019 skb->csum_level = 1;
1020 skb->encapsulation = 1;
05909bab 1021 stats->csum_unnecessary_inner++;
603e1f5b 1022 return;
1b223dd3 1023 }
05909bab 1024 stats->csum_unnecessary++;
1b223dd3
SM
1025 return;
1026 }
bbceefce
AS
1027csum_none:
1028 skb->ip_summed = CHECKSUM_NONE;
05909bab 1029 stats->csum_none++;
bbceefce
AS
1030}
1031
f007c13d
NS
1032#define MLX5E_CE_BIT_MASK 0x80
1033
e586b3b0 1034static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe,
461017cb 1035 u32 cqe_bcnt,
e586b3b0
AV
1036 struct mlx5e_rq *rq,
1037 struct sk_buff *skb)
1038{
bd206fd5 1039 u8 lro_num_seg = be32_to_cpu(cqe->srqn) >> 24;
05909bab 1040 struct mlx5e_rq_stats *stats = rq->stats;
e586b3b0 1041 struct net_device *netdev = rq->netdev;
e586b3b0 1042
f938daee 1043 skb->mac_len = ETH_HLEN;
00aebab2 1044
1182f365 1045 mlx5e_tls_handle_rx_skb(rq, skb, cqe, &cqe_bcnt);
00aebab2 1046
b2ac7541
RS
1047 if (unlikely(mlx5_ipsec_is_rx_flow(cqe)))
1048 mlx5e_ipsec_offload_handle_rx_skb(netdev, skb, cqe);
1049
e586b3b0 1050 if (lro_num_seg > 1) {
461017cb 1051 mlx5e_lro_update_hdr(skb, cqe, cqe_bcnt);
d9a40271 1052 skb_shinfo(skb)->gso_size = DIV_ROUND_UP(cqe_bcnt, lro_num_seg);
8ab7e2ae
GP
1053 /* Subtract one since we already counted this as one
1054 * "regular" packet in mlx5e_complete_rx_cqe()
1055 */
05909bab
EBE
1056 stats->packets += lro_num_seg - 1;
1057 stats->lro_packets++;
1058 stats->lro_bytes += cqe_bcnt;
e586b3b0
AV
1059 }
1060
7c39afb3
FD
1061 if (unlikely(mlx5e_rx_hw_stamp(rq->tstamp)))
1062 skb_hwtstamps(skb)->hwtstamp =
1063 mlx5_timecounter_cyc2time(rq->clock, get_cqe_ts(cqe));
ef9814de 1064
e586b3b0
AV
1065 skb_record_rx_queue(skb, rq->ix);
1066
1067 if (likely(netdev->features & NETIF_F_RXHASH))
1068 mlx5e_skb_set_hash(cqe, skb);
1069
f24686e8 1070 if (cqe_has_vlan(cqe)) {
e586b3b0
AV
1071 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
1072 be16_to_cpu(cqe->vlan_info));
05909bab 1073 stats->removed_vlan_packets++;
f24686e8 1074 }
12185a9f
AV
1075
1076 skb->mark = be32_to_cpu(cqe->sop_drop_qpn) & MLX5E_TC_FLOW_ID_MASK;
e20a0db3
SM
1077
1078 mlx5e_handle_csum(netdev, cqe, rq, skb, !!lro_num_seg);
f007c13d
NS
1079 /* checking CE bit in cqe - MSB in ml_path field */
1080 if (unlikely(cqe->ml_path & MLX5E_CE_BIT_MASK))
1081 mlx5e_enable_ecn(rq, skb);
1082
e20a0db3 1083 skb->protocol = eth_type_trans(skb, netdev);
47c97e6b
RD
1084
1085 if (unlikely(mlx5e_skb_is_multicast(skb)))
1086 stats->mcast_packets++;
e586b3b0
AV
1087}
1088
461017cb
TT
1089static inline void mlx5e_complete_rx_cqe(struct mlx5e_rq *rq,
1090 struct mlx5_cqe64 *cqe,
1091 u32 cqe_bcnt,
1092 struct sk_buff *skb)
1093{
05909bab
EBE
1094 struct mlx5e_rq_stats *stats = rq->stats;
1095
1096 stats->packets++;
1097 stats->bytes += cqe_bcnt;
461017cb 1098 mlx5e_build_rx_skb(cqe, cqe_bcnt, rq, skb);
461017cb
TT
1099}
1100
619a8f2a
TT
1101static inline
1102struct sk_buff *mlx5e_build_linear_skb(struct mlx5e_rq *rq, void *va,
1103 u32 frag_size, u16 headroom,
1104 u32 cqe_bcnt)
1105{
1106 struct sk_buff *skb = build_skb(va, frag_size);
1107
1108 if (unlikely(!skb)) {
05909bab 1109 rq->stats->buff_alloc_err++;
619a8f2a
TT
1110 return NULL;
1111 }
1112
1113 skb_reserve(skb, headroom);
1114 skb_put(skb, cqe_bcnt);
1115
1116 return skb;
1117}
1118
39d6443c
BT
1119static void mlx5e_fill_xdp_buff(struct mlx5e_rq *rq, void *va, u16 headroom,
1120 u32 len, struct xdp_buff *xdp)
1121{
1122 xdp->data_hard_start = va;
39d6443c 1123 xdp->data = va + headroom;
56e2287b 1124 xdp_set_data_meta_invalid(xdp);
39d6443c
BT
1125 xdp->data_end = xdp->data + len;
1126 xdp->rxq = &rq->xdp_rxq;
1127 xdp->frame_sz = rq->buff.frame0_sz;
1128}
1129
5adf4c47 1130static struct sk_buff *
069d1146
TT
1131mlx5e_skb_from_cqe_linear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
1132 struct mlx5e_wqe_frag_info *wi, u32 cqe_bcnt)
2f48af12 1133{
069d1146 1134 struct mlx5e_dma_info *di = wi->di;
b45d8b50 1135 u16 rx_headroom = rq->buff.headroom;
39d6443c 1136 struct xdp_buff xdp;
1bfecfca 1137 struct sk_buff *skb;
b5503b99 1138 void *va, *data;
78aedd32 1139 u32 frag_size;
2f48af12 1140
accd5883 1141 va = page_address(di->page) + wi->offset;
d8bec2b2 1142 data = va + rx_headroom;
accd5883 1143 frag_size = MLX5_SKB_FRAG_SZ(rx_headroom + cqe_bcnt);
2f48af12 1144
bd658dda
TT
1145 dma_sync_single_range_for_cpu(rq->pdev, di->addr, wi->offset,
1146 frag_size, DMA_FROM_DEVICE);
03993094 1147 prefetchw(va); /* xdp_frame data area */
b5503b99 1148 prefetch(data);
2f48af12 1149
39d6443c 1150 mlx5e_fill_xdp_buff(rq, va, rx_headroom, cqe_bcnt, &xdp);
9c25a22d 1151 if (mlx5e_xdp_handle(rq, di, &cqe_bcnt, &xdp))
8515c581 1152 return NULL; /* page/packet was consumed by XDP */
86994156 1153
39d6443c 1154 rx_headroom = xdp.data - xdp.data_hard_start;
d628ee4f 1155 frag_size = MLX5_SKB_FRAG_SZ(rx_headroom + cqe_bcnt);
619a8f2a
TT
1156 skb = mlx5e_build_linear_skb(rq, va, frag_size, rx_headroom, cqe_bcnt);
1157 if (unlikely(!skb))
8515c581 1158 return NULL;
1bfecfca 1159
accd5883 1160 /* queue up for recycling/reuse */
1bfecfca 1161 page_ref_inc(di->page);
1bfecfca 1162
8515c581
OG
1163 return skb;
1164}
1165
5adf4c47 1166static struct sk_buff *
069d1146
TT
1167mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
1168 struct mlx5e_wqe_frag_info *wi, u32 cqe_bcnt)
1169{
1170 struct mlx5e_rq_frag_info *frag_info = &rq->wqe.info.arr[0];
1171 struct mlx5e_wqe_frag_info *head_wi = wi;
1172 u16 headlen = min_t(u32, MLX5E_RX_MAX_HEAD, cqe_bcnt);
1173 u16 frag_headlen = headlen;
1174 u16 byte_cnt = cqe_bcnt - headlen;
1175 struct sk_buff *skb;
1176
069d1146
TT
1177 /* XDP is not supported in this configuration, as incoming packets
1178 * might spread among multiple pages.
1179 */
1180 skb = napi_alloc_skb(rq->cq.napi,
1181 ALIGN(MLX5E_RX_MAX_HEAD, sizeof(long)));
1182 if (unlikely(!skb)) {
1183 rq->stats->buff_alloc_err++;
1184 return NULL;
1185 }
1186
1187 prefetchw(skb->data);
1188
1189 while (byte_cnt) {
1190 u16 frag_consumed_bytes =
1191 min_t(u16, frag_info->frag_size - frag_headlen, byte_cnt);
1192
1193 mlx5e_add_skb_frag(rq, skb, wi->di, wi->offset + frag_headlen,
1194 frag_consumed_bytes, frag_info->frag_stride);
1195 byte_cnt -= frag_consumed_bytes;
1196 frag_headlen = 0;
1197 frag_info++;
1198 wi++;
1199 }
1200
1201 /* copy header */
94816278 1202 mlx5e_copy_skb_header(rq->pdev, skb, head_wi->di, head_wi->offset, headlen);
069d1146
TT
1203 /* skb linear part was allocated with headlen and aligned to long */
1204 skb->tail += headlen;
1205 skb->len += headlen;
1206
1207 return skb;
1208}
1209
8276ea13
AL
1210static void trigger_report(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
1211{
1212 struct mlx5_err_cqe *err_cqe = (struct mlx5_err_cqe *)cqe;
1213
1214 if (cqe_syndrome_needs_recover(err_cqe->syndrome) &&
b9961af7
AL
1215 !test_and_set_bit(MLX5E_RQ_STATE_RECOVERING, &rq->state)) {
1216 mlx5e_dump_error_cqe(&rq->cq, rq->rqn, err_cqe);
8276ea13 1217 queue_work(rq->channel->priv->wq, &rq->recover_work);
b9961af7 1218 }
8276ea13
AL
1219}
1220
5adf4c47 1221static void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
8515c581 1222{
99cbfa93 1223 struct mlx5_wq_cyc *wq = &rq->wqe.wq;
accd5883 1224 struct mlx5e_wqe_frag_info *wi;
8515c581 1225 struct sk_buff *skb;
8515c581 1226 u32 cqe_bcnt;
99cbfa93 1227 u16 ci;
8515c581 1228
99cbfa93
TT
1229 ci = mlx5_wq_cyc_ctr2ix(wq, be16_to_cpu(cqe->wqe_counter));
1230 wi = get_frag(rq, ci);
1231 cqe_bcnt = be32_to_cpu(cqe->byte_cnt);
8515c581 1232
0a35ab3e 1233 if (unlikely(MLX5E_RX_ERR_CQE(cqe))) {
8276ea13 1234 trigger_report(rq, cqe);
0a35ab3e
SM
1235 rq->stats->wqe_err++;
1236 goto free_wqe;
1237 }
1238
b3c04e83
PA
1239 skb = INDIRECT_CALL_2(rq->wqe.skb_from_cqe,
1240 mlx5e_skb_from_cqe_linear,
1241 mlx5e_skb_from_cqe_nonlinear,
1242 rq, cqe, wi, cqe_bcnt);
accd5883
TT
1243 if (!skb) {
1244 /* probably for XDP */
121e8927 1245 if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) {
069d1146
TT
1246 /* do not return page to cache,
1247 * it will be returned on XDP_TX completion.
1248 */
99cbfa93 1249 goto wq_cyc_pop;
accd5883 1250 }
069d1146 1251 goto free_wqe;
accd5883 1252 }
8515c581 1253
461017cb 1254 mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
8515c581 1255 napi_gro_receive(rq->cq.napi, skb);
2f48af12 1256
069d1146 1257free_wqe:
cb5189d1 1258 mlx5e_free_rx_wqe(rq, wi, true);
99cbfa93
TT
1259wq_cyc_pop:
1260 mlx5_wq_cyc_pop(wq);
2f48af12
TT
1261}
1262
e80541ec 1263#ifdef CONFIG_MLX5_ESWITCH
5adf4c47 1264static void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
f5f82476
OG
1265{
1266 struct net_device *netdev = rq->netdev;
1267 struct mlx5e_priv *priv = netdev_priv(netdev);
1d447a39
SM
1268 struct mlx5e_rep_priv *rpriv = priv->ppriv;
1269 struct mlx5_eswitch_rep *rep = rpriv->rep;
b8ce9037 1270 struct mlx5e_tc_update_priv tc_priv = {};
99cbfa93 1271 struct mlx5_wq_cyc *wq = &rq->wqe.wq;
accd5883 1272 struct mlx5e_wqe_frag_info *wi;
f5f82476 1273 struct sk_buff *skb;
f5f82476 1274 u32 cqe_bcnt;
99cbfa93 1275 u16 ci;
f5f82476 1276
99cbfa93
TT
1277 ci = mlx5_wq_cyc_ctr2ix(wq, be16_to_cpu(cqe->wqe_counter));
1278 wi = get_frag(rq, ci);
1279 cqe_bcnt = be32_to_cpu(cqe->byte_cnt);
f5f82476 1280
0a35ab3e
SM
1281 if (unlikely(MLX5E_RX_ERR_CQE(cqe))) {
1282 rq->stats->wqe_err++;
1283 goto free_wqe;
1284 }
1285
2901a5c6
TT
1286 skb = INDIRECT_CALL_2(rq->wqe.skb_from_cqe,
1287 mlx5e_skb_from_cqe_linear,
1288 mlx5e_skb_from_cqe_nonlinear,
1289 rq, cqe, wi, cqe_bcnt);
accd5883 1290 if (!skb) {
069d1146 1291 /* probably for XDP */
121e8927 1292 if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) {
069d1146
TT
1293 /* do not return page to cache,
1294 * it will be returned on XDP_TX completion.
1295 */
99cbfa93 1296 goto wq_cyc_pop;
accd5883 1297 }
069d1146 1298 goto free_wqe;
accd5883 1299 }
f5f82476
OG
1300
1301 mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
1302
1303 if (rep->vlan && skb_vlan_tag_present(skb))
1304 skb_vlan_pop(skb);
1305
768c3667 1306 if (!mlx5e_rep_tc_update_skb(cqe, skb, &tc_priv))
d6d27782
PB
1307 goto free_wqe;
1308
f5f82476
OG
1309 napi_gro_receive(rq->cq.napi, skb);
1310
768c3667 1311 mlx5_rep_tc_post_napi_receive(&tc_priv);
b8ce9037 1312
069d1146 1313free_wqe:
cb5189d1 1314 mlx5e_free_rx_wqe(rq, wi, true);
99cbfa93
TT
1315wq_cyc_pop:
1316 mlx5_wq_cyc_pop(wq);
f5f82476 1317}
dfd9e750 1318
5adf4c47 1319static void mlx5e_handle_rx_cqe_mpwrq_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
dfd9e750
PB
1320{
1321 u16 cstrides = mpwrq_get_cqe_consumed_strides(cqe);
1322 u16 wqe_id = be16_to_cpu(cqe->wqe_id);
1323 struct mlx5e_mpw_info *wi = &rq->mpwqe.info[wqe_id];
1324 u16 stride_ix = mpwrq_get_cqe_stride_index(cqe);
1325 u32 wqe_offset = stride_ix << rq->mpwqe.log_stride_sz;
1326 u32 head_offset = wqe_offset & (PAGE_SIZE - 1);
1327 u32 page_idx = wqe_offset >> PAGE_SHIFT;
b8ce9037 1328 struct mlx5e_tc_update_priv tc_priv = {};
dfd9e750
PB
1329 struct mlx5e_rx_wqe_ll *wqe;
1330 struct mlx5_wq_ll *wq;
1331 struct sk_buff *skb;
1332 u16 cqe_bcnt;
1333
1334 wi->consumed_strides += cstrides;
1335
1336 if (unlikely(MLX5E_RX_ERR_CQE(cqe))) {
1337 trigger_report(rq, cqe);
1338 rq->stats->wqe_err++;
1339 goto mpwrq_cqe_out;
1340 }
1341
1342 if (unlikely(mpwrq_is_filler_cqe(cqe))) {
1343 struct mlx5e_rq_stats *stats = rq->stats;
1344
1345 stats->mpwqe_filler_cqes++;
1346 stats->mpwqe_filler_strides += cstrides;
1347 goto mpwrq_cqe_out;
1348 }
1349
1350 cqe_bcnt = mpwrq_get_cqe_byte_cnt(cqe);
1351
1352 skb = INDIRECT_CALL_2(rq->mpwqe.skb_from_cqe_mpwrq,
1353 mlx5e_skb_from_cqe_mpwrq_linear,
1354 mlx5e_skb_from_cqe_mpwrq_nonlinear,
1355 rq, wi, cqe_bcnt, head_offset, page_idx);
1356 if (!skb)
1357 goto mpwrq_cqe_out;
1358
1359 mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
1360
768c3667 1361 if (!mlx5e_rep_tc_update_skb(cqe, skb, &tc_priv))
d6d27782
PB
1362 goto mpwrq_cqe_out;
1363
dfd9e750
PB
1364 napi_gro_receive(rq->cq.napi, skb);
1365
768c3667 1366 mlx5_rep_tc_post_napi_receive(&tc_priv);
b8ce9037 1367
dfd9e750
PB
1368mpwrq_cqe_out:
1369 if (likely(wi->consumed_strides < rq->mpwqe.num_strides))
1370 return;
1371
1372 wq = &rq->mpwqe.wq;
1373 wqe = mlx5_wq_ll_get_wqe(wq, wqe_id);
1374 mlx5e_free_rx_mpwqe(rq, wi, true);
1375 mlx5_wq_ll_pop(wq, cqe->wqe_id, &wqe->next.next_wqe_index);
1376}
5adf4c47
TT
1377
1378const struct mlx5e_rx_handlers mlx5e_rx_handlers_rep = {
1379 .handle_rx_cqe = mlx5e_handle_rx_cqe_rep,
1380 .handle_rx_cqe_mpwqe = mlx5e_handle_rx_cqe_mpwrq_rep,
1381};
e80541ec 1382#endif
f5f82476 1383
5adf4c47 1384static struct sk_buff *
619a8f2a
TT
1385mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
1386 u16 cqe_bcnt, u32 head_offset, u32 page_idx)
bc77b240 1387{
75aa889f 1388 u16 headlen = min_t(u16, MLX5E_RX_MAX_HEAD, cqe_bcnt);
9f9e9cd5 1389 struct mlx5e_dma_info *di = &wi->umr.dma_info[page_idx];
bc77b240 1390 u32 frag_offset = head_offset + headlen;
9f9e9cd5
TT
1391 u32 byte_cnt = cqe_bcnt - headlen;
1392 struct mlx5e_dma_info *head_di = di;
619a8f2a
TT
1393 struct sk_buff *skb;
1394
1395 skb = napi_alloc_skb(rq->cq.napi,
75aa889f 1396 ALIGN(MLX5E_RX_MAX_HEAD, sizeof(long)));
619a8f2a 1397 if (unlikely(!skb)) {
05909bab 1398 rq->stats->buff_alloc_err++;
619a8f2a
TT
1399 return NULL;
1400 }
1401
1402 prefetchw(skb->data);
bc77b240 1403
bc77b240 1404 if (unlikely(frag_offset >= PAGE_SIZE)) {
9f9e9cd5 1405 di++;
bc77b240
TT
1406 frag_offset -= PAGE_SIZE;
1407 }
bc77b240
TT
1408
1409 while (byte_cnt) {
1410 u32 pg_consumed_bytes =
1411 min_t(u32, PAGE_SIZE - frag_offset, byte_cnt);
fa698366
TT
1412 unsigned int truesize =
1413 ALIGN(pg_consumed_bytes, BIT(rq->mpwqe.log_stride_sz));
bc77b240 1414
fa698366
TT
1415 mlx5e_add_skb_frag(rq, skb, di, frag_offset,
1416 pg_consumed_bytes, truesize);
bc77b240
TT
1417 byte_cnt -= pg_consumed_bytes;
1418 frag_offset = 0;
9f9e9cd5 1419 di++;
bc77b240
TT
1420 }
1421 /* copy header */
94816278 1422 mlx5e_copy_skb_header(rq->pdev, skb, head_di, head_offset, headlen);
bc77b240
TT
1423 /* skb linear part was allocated with headlen and aligned to long */
1424 skb->tail += headlen;
1425 skb->len += headlen;
619a8f2a
TT
1426
1427 return skb;
1428}
1429
5adf4c47 1430static struct sk_buff *
619a8f2a
TT
1431mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
1432 u16 cqe_bcnt, u32 head_offset, u32 page_idx)
1433{
1434 struct mlx5e_dma_info *di = &wi->umr.dma_info[page_idx];
1435 u16 rx_headroom = rq->buff.headroom;
22f45398 1436 u32 cqe_bcnt32 = cqe_bcnt;
39d6443c 1437 struct xdp_buff xdp;
619a8f2a
TT
1438 struct sk_buff *skb;
1439 void *va, *data;
1440 u32 frag_size;
1441
0073c8f7
MS
1442 /* Check packet size. Note LRO doesn't use linear SKB */
1443 if (unlikely(cqe_bcnt > rq->hw_mtu)) {
1444 rq->stats->oversize_pkts_sw_drop++;
1445 return NULL;
1446 }
1447
619a8f2a
TT
1448 va = page_address(di->page) + head_offset;
1449 data = va + rx_headroom;
22f45398 1450 frag_size = MLX5_SKB_FRAG_SZ(rx_headroom + cqe_bcnt32);
619a8f2a
TT
1451
1452 dma_sync_single_range_for_cpu(rq->pdev, di->addr, head_offset,
1453 frag_size, DMA_FROM_DEVICE);
d3398a4f 1454 prefetchw(va); /* xdp_frame data area */
619a8f2a 1455 prefetch(data);
22f45398 1456
39d6443c 1457 mlx5e_fill_xdp_buff(rq, va, rx_headroom, cqe_bcnt32, &xdp);
9c25a22d 1458 if (mlx5e_xdp_handle(rq, di, &cqe_bcnt32, &xdp)) {
22f45398
TT
1459 if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags))
1460 __set_bit(page_idx, wi->xdp_xmit_bitmap); /* non-atomic */
1461 return NULL; /* page/packet was consumed by XDP */
1462 }
1463
39d6443c 1464 rx_headroom = xdp.data - xdp.data_hard_start;
d628ee4f 1465 frag_size = MLX5_SKB_FRAG_SZ(rx_headroom + cqe_bcnt32);
22f45398 1466 skb = mlx5e_build_linear_skb(rq, va, frag_size, rx_headroom, cqe_bcnt32);
619a8f2a
TT
1467 if (unlikely(!skb))
1468 return NULL;
1469
1470 /* queue up for recycling/reuse */
9f9e9cd5 1471 page_ref_inc(di->page);
619a8f2a
TT
1472
1473 return skb;
bc77b240
TT
1474}
1475
5adf4c47 1476static void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
461017cb
TT
1477{
1478 u16 cstrides = mpwrq_get_cqe_consumed_strides(cqe);
461017cb 1479 u16 wqe_id = be16_to_cpu(cqe->wqe_id);
21c59685 1480 struct mlx5e_mpw_info *wi = &rq->mpwqe.info[wqe_id];
619a8f2a
TT
1481 u16 stride_ix = mpwrq_get_cqe_stride_index(cqe);
1482 u32 wqe_offset = stride_ix << rq->mpwqe.log_stride_sz;
1483 u32 head_offset = wqe_offset & (PAGE_SIZE - 1);
1484 u32 page_idx = wqe_offset >> PAGE_SHIFT;
99cbfa93 1485 struct mlx5e_rx_wqe_ll *wqe;
422d4c40 1486 struct mlx5_wq_ll *wq;
461017cb 1487 struct sk_buff *skb;
461017cb 1488 u16 cqe_bcnt;
461017cb
TT
1489
1490 wi->consumed_strides += cstrides;
1491
0a35ab3e 1492 if (unlikely(MLX5E_RX_ERR_CQE(cqe))) {
8276ea13 1493 trigger_report(rq, cqe);
05909bab 1494 rq->stats->wqe_err++;
461017cb
TT
1495 goto mpwrq_cqe_out;
1496 }
1497
1498 if (unlikely(mpwrq_is_filler_cqe(cqe))) {
b71ba6b4
TT
1499 struct mlx5e_rq_stats *stats = rq->stats;
1500
1501 stats->mpwqe_filler_cqes++;
1502 stats->mpwqe_filler_strides += cstrides;
461017cb
TT
1503 goto mpwrq_cqe_out;
1504 }
1505
461017cb 1506 cqe_bcnt = mpwrq_get_cqe_byte_cnt(cqe);
461017cb 1507
b3c04e83
PA
1508 skb = INDIRECT_CALL_2(rq->mpwqe.skb_from_cqe_mpwrq,
1509 mlx5e_skb_from_cqe_mpwrq_linear,
1510 mlx5e_skb_from_cqe_mpwrq_nonlinear,
1511 rq, wi, cqe_bcnt, head_offset, page_idx);
22f45398 1512 if (!skb)
619a8f2a
TT
1513 goto mpwrq_cqe_out;
1514
461017cb 1515 mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
8515c581 1516 napi_gro_receive(rq->cq.napi, skb);
461017cb
TT
1517
1518mpwrq_cqe_out:
b45d8b50 1519 if (likely(wi->consumed_strides < rq->mpwqe.num_strides))
461017cb
TT
1520 return;
1521
422d4c40
TT
1522 wq = &rq->mpwqe.wq;
1523 wqe = mlx5_wq_ll_get_wqe(wq, wqe_id);
cb5189d1 1524 mlx5e_free_rx_mpwqe(rq, wi, true);
422d4c40 1525 mlx5_wq_ll_pop(wq, cqe->wqe_id, &wqe->next.next_wqe_index);
461017cb
TT
1526}
1527
44fb6fbb 1528int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget)
e586b3b0 1529{
e3391054 1530 struct mlx5e_rq *rq = container_of(cq, struct mlx5e_rq, cq);
79d356ef 1531 struct mlx5_cqwq *cqwq = &cq->wq;
4b7dfc99 1532 struct mlx5_cqe64 *cqe;
7219ab34 1533 int work_done = 0;
e586b3b0 1534
0e5c04f6 1535 if (unlikely(!test_bit(MLX5E_RQ_STATE_ENABLED, &rq->state)))
6cd392a0
DJ
1536 return 0;
1537
6849c6d8
SM
1538 if (rq->page_pool)
1539 page_pool_nid_changed(rq->page_pool, numa_mem_id());
1540
9df86bdb 1541 if (rq->cqd.left) {
79d356ef 1542 work_done += mlx5e_decompress_cqes_cont(rq, cqwq, 0, budget);
9df86bdb
MM
1543 if (rq->cqd.left || work_done >= budget)
1544 goto out;
1545 }
7219ab34 1546
79d356ef 1547 cqe = mlx5_cqwq_get_cqe(cqwq);
bfc69825
TT
1548 if (!cqe) {
1549 if (unlikely(work_done))
1550 goto out;
4b7dfc99 1551 return 0;
bfc69825 1552 }
e586b3b0 1553
4b7dfc99 1554 do {
7219ab34
TT
1555 if (mlx5_get_cqe_format(cqe) == MLX5_COMPRESSED) {
1556 work_done +=
79d356ef 1557 mlx5e_decompress_cqes_start(rq, cqwq,
7219ab34
TT
1558 budget - work_done);
1559 continue;
1560 }
1561
79d356ef 1562 mlx5_cqwq_pop(cqwq);
a1f5a1a8 1563
55f96872
PA
1564 INDIRECT_CALL_2(rq->handle_rx_cqe, mlx5e_handle_rx_cqe_mpwrq,
1565 mlx5e_handle_rx_cqe, rq, cqe);
79d356ef 1566 } while ((++work_done < budget) && (cqe = mlx5_cqwq_get_cqe(cqwq)));
e586b3b0 1567
bfc69825 1568out:
4fb2f516
TT
1569 if (rq->xdp_prog)
1570 mlx5e_xdp_rx_poll_complete(rq);
5168d732 1571
79d356ef 1572 mlx5_cqwq_update_db_record(cqwq);
e586b3b0
AV
1573
1574 /* ensure cq space is freed before enabling more cqes */
1575 wmb();
1576
44fb6fbb 1577 return work_done;
e586b3b0 1578}
1c4bf940 1579
9d6bd752
SM
1580#ifdef CONFIG_MLX5_CORE_IPOIB
1581
8b46d424 1582#define MLX5_IB_GRH_SGID_OFFSET 8
9d6bd752 1583#define MLX5_IB_GRH_DGID_OFFSET 24
9d6bd752
SM
1584#define MLX5_GID_SIZE 16
1585
1586static inline void mlx5i_complete_rx_cqe(struct mlx5e_rq *rq,
1587 struct mlx5_cqe64 *cqe,
1588 u32 cqe_bcnt,
1589 struct sk_buff *skb)
1590{
36e564b7 1591 struct hwtstamp_config *tstamp;
19052a3b 1592 struct mlx5e_rq_stats *stats;
7e7f4780 1593 struct net_device *netdev;
36e564b7 1594 struct mlx5e_priv *priv;
b57fe691 1595 char *pseudo_header;
8b46d424 1596 u32 flags_rqpn;
7e7f4780 1597 u32 qpn;
9d6bd752
SM
1598 u8 *dgid;
1599 u8 g;
1600
7e7f4780
AV
1601 qpn = be32_to_cpu(cqe->sop_drop_qpn) & 0xffffff;
1602 netdev = mlx5i_pkey_get_netdev(rq->netdev, qpn);
1603
1604 /* No mapping present, cannot process SKB. This might happen if a child
1605 * interface is going down while having unprocessed CQEs on parent RQ
1606 */
1607 if (unlikely(!netdev)) {
1608 /* TODO: add drop counters support */
1609 skb->dev = NULL;
1610 pr_warn_once("Unable to map QPN %u to dev - dropping skb\n", qpn);
1611 return;
1612 }
1613
36e564b7
FD
1614 priv = mlx5i_epriv(netdev);
1615 tstamp = &priv->tstamp;
19052a3b 1616 stats = &priv->channel_stats[rq->ix].rq;
36e564b7 1617
8b46d424
ES
1618 flags_rqpn = be32_to_cpu(cqe->flags_rqpn);
1619 g = (flags_rqpn >> 28) & 3;
9d6bd752
SM
1620 dgid = skb->data + MLX5_IB_GRH_DGID_OFFSET;
1621 if ((!g) || dgid[0] != 0xff)
1622 skb->pkt_type = PACKET_HOST;
1623 else if (memcmp(dgid, netdev->broadcast + 4, MLX5_GID_SIZE) == 0)
1624 skb->pkt_type = PACKET_BROADCAST;
1625 else
1626 skb->pkt_type = PACKET_MULTICAST;
1627
8b46d424
ES
1628 /* Drop packets that this interface sent, ie multicast packets
1629 * that the HCA has replicated.
9d6bd752 1630 */
8b46d424
ES
1631 if (g && (qpn == (flags_rqpn & 0xffffff)) &&
1632 (memcmp(netdev->dev_addr + 4, skb->data + MLX5_IB_GRH_SGID_OFFSET,
1633 MLX5_GID_SIZE) == 0)) {
1634 skb->dev = NULL;
1635 return;
1636 }
9d6bd752
SM
1637
1638 skb_pull(skb, MLX5_IB_GRH_BYTES);
1639
1640 skb->protocol = *((__be16 *)(skb->data));
1641
3d6f3cdf
FD
1642 if (netdev->features & NETIF_F_RXCSUM) {
1643 skb->ip_summed = CHECKSUM_COMPLETE;
1644 skb->csum = csum_unfold((__force __sum16)cqe->check_sum);
1645 stats->csum_complete++;
1646 } else {
1647 skb->ip_summed = CHECKSUM_NONE;
1648 stats->csum_none++;
1649 }
9d6bd752 1650
36e564b7 1651 if (unlikely(mlx5e_rx_hw_stamp(tstamp)))
7c39afb3
FD
1652 skb_hwtstamps(skb)->hwtstamp =
1653 mlx5_timecounter_cyc2time(rq->clock, get_cqe_ts(cqe));
3844b07e 1654
9d6bd752
SM
1655 skb_record_rx_queue(skb, rq->ix);
1656
1657 if (likely(netdev->features & NETIF_F_RXHASH))
1658 mlx5e_skb_set_hash(cqe, skb);
1659
b57fe691
ES
1660 /* 20 bytes of ipoib header and 4 for encap existing */
1661 pseudo_header = skb_push(skb, MLX5_IPOIB_PSEUDO_LEN);
1662 memset(pseudo_header, 0, MLX5_IPOIB_PSEUDO_LEN);
9d6bd752 1663 skb_reset_mac_header(skb);
b57fe691 1664 skb_pull(skb, MLX5_IPOIB_HARD_LEN);
9d6bd752
SM
1665
1666 skb->dev = netdev;
1667
05909bab
EBE
1668 stats->packets++;
1669 stats->bytes += cqe_bcnt;
9d6bd752
SM
1670}
1671
5adf4c47 1672static void mlx5i_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
9d6bd752 1673{
99cbfa93 1674 struct mlx5_wq_cyc *wq = &rq->wqe.wq;
accd5883 1675 struct mlx5e_wqe_frag_info *wi;
9d6bd752 1676 struct sk_buff *skb;
9d6bd752 1677 u32 cqe_bcnt;
99cbfa93 1678 u16 ci;
9d6bd752 1679
99cbfa93
TT
1680 ci = mlx5_wq_cyc_ctr2ix(wq, be16_to_cpu(cqe->wqe_counter));
1681 wi = get_frag(rq, ci);
1682 cqe_bcnt = be32_to_cpu(cqe->byte_cnt);
9d6bd752 1683
0a35ab3e
SM
1684 if (unlikely(MLX5E_RX_ERR_CQE(cqe))) {
1685 rq->stats->wqe_err++;
1686 goto wq_free_wqe;
1687 }
1688
b3c04e83
PA
1689 skb = INDIRECT_CALL_2(rq->wqe.skb_from_cqe,
1690 mlx5e_skb_from_cqe_linear,
1691 mlx5e_skb_from_cqe_nonlinear,
1692 rq, cqe, wi, cqe_bcnt);
9d6bd752 1693 if (!skb)
accd5883 1694 goto wq_free_wqe;
9d6bd752
SM
1695
1696 mlx5i_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
7e7f4780
AV
1697 if (unlikely(!skb->dev)) {
1698 dev_kfree_skb_any(skb);
1699 goto wq_free_wqe;
1700 }
9d6bd752
SM
1701 napi_gro_receive(rq->cq.napi, skb);
1702
accd5883 1703wq_free_wqe:
cb5189d1 1704 mlx5e_free_rx_wqe(rq, wi, true);
99cbfa93 1705 mlx5_wq_cyc_pop(wq);
9d6bd752
SM
1706}
1707
5adf4c47
TT
1708const struct mlx5e_rx_handlers mlx5i_rx_handlers = {
1709 .handle_rx_cqe = mlx5i_handle_rx_cqe,
1710 .handle_rx_cqe_mpwqe = NULL, /* Not supported */
1711};
9d6bd752 1712#endif /* CONFIG_MLX5_CORE_IPOIB */
899a59d3
IT
1713
1714#ifdef CONFIG_MLX5_EN_IPSEC
1715
5adf4c47 1716static void mlx5e_ipsec_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
899a59d3 1717{
99cbfa93 1718 struct mlx5_wq_cyc *wq = &rq->wqe.wq;
899a59d3 1719 struct mlx5e_wqe_frag_info *wi;
899a59d3 1720 struct sk_buff *skb;
899a59d3 1721 u32 cqe_bcnt;
99cbfa93 1722 u16 ci;
899a59d3 1723
99cbfa93
TT
1724 ci = mlx5_wq_cyc_ctr2ix(wq, be16_to_cpu(cqe->wqe_counter));
1725 wi = get_frag(rq, ci);
1726 cqe_bcnt = be32_to_cpu(cqe->byte_cnt);
899a59d3 1727
0a35ab3e
SM
1728 if (unlikely(MLX5E_RX_ERR_CQE(cqe))) {
1729 rq->stats->wqe_err++;
1730 goto wq_free_wqe;
1731 }
1732
b3c04e83
PA
1733 skb = INDIRECT_CALL_2(rq->wqe.skb_from_cqe,
1734 mlx5e_skb_from_cqe_linear,
1735 mlx5e_skb_from_cqe_nonlinear,
1736 rq, cqe, wi, cqe_bcnt);
0a35ab3e
SM
1737 if (unlikely(!skb)) /* a DROP, save the page-reuse checks */
1738 goto wq_free_wqe;
1739
b3ccf978 1740 skb = mlx5e_ipsec_handle_rx_skb(rq->netdev, skb, &cqe_bcnt);
0a35ab3e
SM
1741 if (unlikely(!skb))
1742 goto wq_free_wqe;
899a59d3
IT
1743
1744 mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
1745 napi_gro_receive(rq->cq.napi, skb);
1746
0a35ab3e 1747wq_free_wqe:
cb5189d1 1748 mlx5e_free_rx_wqe(rq, wi, true);
99cbfa93 1749 mlx5_wq_cyc_pop(wq);
899a59d3
IT
1750}
1751
1752#endif /* CONFIG_MLX5_EN_IPSEC */
5adf4c47
TT
1753
1754int mlx5e_rq_set_handlers(struct mlx5e_rq *rq, struct mlx5e_params *params, bool xsk)
1755{
1756 struct mlx5_core_dev *mdev = rq->mdev;
1757 struct mlx5e_channel *c = rq->channel;
1758
1759 switch (rq->wq_type) {
1760 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
1761 rq->mpwqe.skb_from_cqe_mpwrq = xsk ?
1762 mlx5e_xsk_skb_from_cqe_mpwrq_linear :
1763 mlx5e_rx_mpwqe_is_linear_skb(mdev, params, NULL) ?
1764 mlx5e_skb_from_cqe_mpwrq_linear :
1765 mlx5e_skb_from_cqe_mpwrq_nonlinear;
1766 rq->post_wqes = mlx5e_post_rx_mpwqes;
1767 rq->dealloc_wqe = mlx5e_dealloc_rx_mpwqe;
1768
1769 rq->handle_rx_cqe = c->priv->profile->rx_handlers->handle_rx_cqe_mpwqe;
1770#ifdef CONFIG_MLX5_EN_IPSEC
1771 if (MLX5_IPSEC_DEV(mdev)) {
1772 netdev_err(c->netdev, "MPWQE RQ with IPSec offload not supported\n");
1773 return -EINVAL;
1774 }
1775#endif
1776 if (!rq->handle_rx_cqe) {
1777 netdev_err(c->netdev, "RX handler of MPWQE RQ is not set\n");
1778 return -EINVAL;
1779 }
1780 break;
1781 default: /* MLX5_WQ_TYPE_CYCLIC */
1782 rq->wqe.skb_from_cqe = xsk ?
1783 mlx5e_xsk_skb_from_cqe_linear :
1784 mlx5e_rx_is_linear_skb(params, NULL) ?
1785 mlx5e_skb_from_cqe_linear :
1786 mlx5e_skb_from_cqe_nonlinear;
1787 rq->post_wqes = mlx5e_post_rx_wqes;
1788 rq->dealloc_wqe = mlx5e_dealloc_rx_wqe;
1789
1790#ifdef CONFIG_MLX5_EN_IPSEC
1791 if ((mlx5_fpga_ipsec_device_caps(mdev) & MLX5_ACCEL_IPSEC_CAP_DEVICE) &&
1792 c->priv->ipsec)
1793 rq->handle_rx_cqe = mlx5e_ipsec_handle_rx_cqe;
1794 else
1795#endif
1796 rq->handle_rx_cqe = c->priv->profile->rx_handlers->handle_rx_cqe;
1797 if (!rq->handle_rx_cqe) {
1798 netdev_err(c->netdev, "RX handler of RQ is not set\n");
1799 return -EINVAL;
1800 }
1801 }
1802
1803 return 0;
1804}