2 * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <linux/bpf.h>
35 #include <linux/bpf_trace.h>
36 #include <linux/mlx4/cq.h>
37 #include <linux/slab.h>
38 #include <linux/mlx4/qp.h>
39 #include <linux/skbuff.h>
40 #include <linux/rculist.h>
41 #include <linux/if_ether.h>
42 #include <linux/if_vlan.h>
43 #include <linux/vmalloc.h>
44 #include <linux/irq.h>
45 #include <linux/skbuff_ref.h>
48 #if IS_ENABLED(CONFIG_IPV6)
49 #include <net/ip6_checksum.h>
54 static int mlx4_alloc_page(struct mlx4_en_priv *priv,
55 struct mlx4_en_rx_alloc *frag,
61 page = alloc_page(gfp);
64 dma = dma_map_page(priv->ddev, page, 0, PAGE_SIZE, priv->dma_dir);
65 if (unlikely(dma_mapping_error(priv->ddev, dma))) {
71 frag->page_offset = priv->rx_headroom;
75 static int mlx4_en_alloc_frags(struct mlx4_en_priv *priv,
76 struct mlx4_en_rx_ring *ring,
77 struct mlx4_en_rx_desc *rx_desc,
78 struct mlx4_en_rx_alloc *frags,
83 for (i = 0; i < priv->num_frags; i++, frags++) {
85 if (mlx4_alloc_page(priv, frags, gfp))
87 ring->rx_alloc_pages++;
89 rx_desc->data[i].addr = cpu_to_be64(frags->dma +
95 static void mlx4_en_free_frag(const struct mlx4_en_priv *priv,
96 struct mlx4_en_rx_alloc *frag)
99 dma_unmap_page(priv->ddev, frag->dma,
100 PAGE_SIZE, priv->dma_dir);
101 __free_page(frag->page);
103 /* We need to clear all fields, otherwise a change of priv->log_rx_info
104 * could lead to see garbage later in frag->page.
106 memset(frag, 0, sizeof(*frag));
109 static void mlx4_en_init_rx_desc(const struct mlx4_en_priv *priv,
110 struct mlx4_en_rx_ring *ring, int index)
112 struct mlx4_en_rx_desc *rx_desc = ring->buf + ring->stride * index;
116 /* Set size and memtype fields */
117 for (i = 0; i < priv->num_frags; i++) {
118 rx_desc->data[i].byte_count =
119 cpu_to_be32(priv->frag_info[i].frag_size);
120 rx_desc->data[i].lkey = cpu_to_be32(priv->mdev->mr.key);
123 /* If the number of used fragments does not fill up the ring stride,
124 * remaining (unused) fragments must be padded with null address/size
125 * and a special memory key */
126 possible_frags = (ring->stride - sizeof(struct mlx4_en_rx_desc)) / DS_SIZE;
127 for (i = priv->num_frags; i < possible_frags; i++) {
128 rx_desc->data[i].byte_count = 0;
129 rx_desc->data[i].lkey = cpu_to_be32(MLX4_EN_MEMTYPE_PAD);
130 rx_desc->data[i].addr = 0;
134 static int mlx4_en_prepare_rx_desc(struct mlx4_en_priv *priv,
135 struct mlx4_en_rx_ring *ring, int index,
138 struct mlx4_en_rx_desc *rx_desc = ring->buf +
139 (index << ring->log_stride);
140 struct mlx4_en_rx_alloc *frags = ring->rx_info +
141 (index << priv->log_rx_info);
142 if (likely(ring->page_cache.index > 0)) {
143 /* XDP uses a single page per frame */
145 ring->page_cache.index--;
146 frags->page = ring->page_cache.buf[ring->page_cache.index].page;
147 frags->dma = ring->page_cache.buf[ring->page_cache.index].dma;
149 frags->page_offset = XDP_PACKET_HEADROOM;
150 rx_desc->data[0].addr = cpu_to_be64(frags->dma +
151 XDP_PACKET_HEADROOM);
155 return mlx4_en_alloc_frags(priv, ring, rx_desc, frags, gfp);
158 static bool mlx4_en_is_ring_empty(const struct mlx4_en_rx_ring *ring)
160 return ring->prod == ring->cons;
163 static inline void mlx4_en_update_rx_prod_db(struct mlx4_en_rx_ring *ring)
165 *ring->wqres.db.db = cpu_to_be32(ring->prod & 0xffff);
169 static void mlx4_en_free_rx_desc(const struct mlx4_en_priv *priv,
170 struct mlx4_en_rx_ring *ring,
173 struct mlx4_en_rx_alloc *frags;
176 frags = ring->rx_info + (index << priv->log_rx_info);
177 for (nr = 0; nr < priv->num_frags; nr++) {
178 en_dbg(DRV, priv, "Freeing fragment:%d\n", nr);
179 mlx4_en_free_frag(priv, frags + nr);
183 /* Function not in fast-path */
184 static int mlx4_en_fill_rx_buffers(struct mlx4_en_priv *priv)
186 struct mlx4_en_rx_ring *ring;
191 for (buf_ind = 0; buf_ind < priv->prof->rx_ring_size; buf_ind++) {
192 for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) {
193 ring = priv->rx_ring[ring_ind];
195 if (mlx4_en_prepare_rx_desc(priv, ring,
198 if (ring->actual_size < MLX4_EN_MIN_RX_SIZE) {
199 en_err(priv, "Failed to allocate enough rx buffers\n");
202 new_size = rounddown_pow_of_two(ring->actual_size);
203 en_warn(priv, "Only %d buffers allocated reducing ring size to %d\n",
204 ring->actual_size, new_size);
215 for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) {
216 ring = priv->rx_ring[ring_ind];
217 while (ring->actual_size > new_size) {
220 mlx4_en_free_rx_desc(priv, ring, ring->actual_size);
227 static void mlx4_en_free_rx_buf(struct mlx4_en_priv *priv,
228 struct mlx4_en_rx_ring *ring)
232 en_dbg(DRV, priv, "Freeing Rx buf - cons:%d prod:%d\n",
233 ring->cons, ring->prod);
235 /* Unmap and free Rx buffers */
236 for (index = 0; index < ring->size; index++) {
237 en_dbg(DRV, priv, "Processing descriptor:%d\n", index);
238 mlx4_en_free_rx_desc(priv, ring, index);
244 void mlx4_en_set_num_rx_rings(struct mlx4_en_dev *mdev)
249 struct mlx4_dev *dev = mdev->dev;
251 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) {
252 num_of_eqs = max_t(int, MIN_RX_RINGS,
254 mlx4_get_eqs_per_port(mdev->dev, i),
257 num_rx_rings = mlx4_low_memory_profile() ? MIN_RX_RINGS :
258 min_t(int, num_of_eqs, num_online_cpus());
259 mdev->profile.prof[i].rx_ring_num =
260 rounddown_pow_of_two(num_rx_rings);
264 int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
265 struct mlx4_en_rx_ring **pring,
266 u32 size, u16 stride, int node, int queue_index)
268 struct mlx4_en_dev *mdev = priv->mdev;
269 struct mlx4_en_rx_ring *ring;
273 ring = kzalloc_node(sizeof(*ring), GFP_KERNEL, node);
275 en_err(priv, "Failed to allocate RX ring structure\n");
282 ring->size_mask = size - 1;
283 ring->stride = stride;
284 ring->log_stride = ffs(ring->stride) - 1;
285 ring->buf_size = ring->size * ring->stride + TXBB_SIZE;
287 if (xdp_rxq_info_reg(&ring->xdp_rxq, priv->dev, queue_index, 0) < 0)
290 tmp = size * roundup_pow_of_two(MLX4_EN_MAX_RX_FRAGS *
291 sizeof(struct mlx4_en_rx_alloc));
292 ring->rx_info = kvzalloc_node(tmp, GFP_KERNEL, node);
293 if (!ring->rx_info) {
298 en_dbg(DRV, priv, "Allocated rx_info ring at addr:%p size:%d\n",
301 /* Allocate HW buffers on provided NUMA node */
302 set_dev_node(&mdev->dev->persist->pdev->dev, node);
303 err = mlx4_alloc_hwq_res(mdev->dev, &ring->wqres, ring->buf_size);
304 set_dev_node(&mdev->dev->persist->pdev->dev, mdev->dev->numa_node);
308 ring->buf = ring->wqres.buf.direct.buf;
310 ring->hwtstamp_rx_filter = priv->hwtstamp_config.rx_filter;
316 kvfree(ring->rx_info);
317 ring->rx_info = NULL;
319 xdp_rxq_info_unreg(&ring->xdp_rxq);
327 int mlx4_en_activate_rx_rings(struct mlx4_en_priv *priv)
329 struct mlx4_en_rx_ring *ring;
333 int stride = roundup_pow_of_two(sizeof(struct mlx4_en_rx_desc) +
334 DS_SIZE * priv->num_frags);
336 for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) {
337 ring = priv->rx_ring[ring_ind];
341 ring->actual_size = 0;
342 ring->cqn = priv->rx_cq[ring_ind]->mcq.cqn;
344 ring->stride = stride;
345 if (ring->stride <= TXBB_SIZE) {
346 /* Stamp first unused send wqe */
347 __be32 *ptr = (__be32 *)ring->buf;
348 __be32 stamp = cpu_to_be32(1 << STAMP_SHIFT);
350 /* Move pointer to start of rx section */
351 ring->buf += TXBB_SIZE;
354 ring->log_stride = ffs(ring->stride) - 1;
355 ring->buf_size = ring->size * ring->stride;
357 memset(ring->buf, 0, ring->buf_size);
358 mlx4_en_update_rx_prod_db(ring);
360 /* Initialize all descriptors */
361 for (i = 0; i < ring->size; i++)
362 mlx4_en_init_rx_desc(priv, ring, i);
364 err = mlx4_en_fill_rx_buffers(priv);
368 for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) {
369 ring = priv->rx_ring[ring_ind];
371 ring->size_mask = ring->actual_size - 1;
372 mlx4_en_update_rx_prod_db(ring);
378 for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++)
379 mlx4_en_free_rx_buf(priv, priv->rx_ring[ring_ind]);
381 ring_ind = priv->rx_ring_num - 1;
382 while (ring_ind >= 0) {
383 if (priv->rx_ring[ring_ind]->stride <= TXBB_SIZE)
384 priv->rx_ring[ring_ind]->buf -= TXBB_SIZE;
390 /* We recover from out of memory by scheduling our napi poll
391 * function (mlx4_en_process_cq), which tries to allocate
392 * all missing RX buffers (call to mlx4_en_refill_rx_buffers).
394 void mlx4_en_recover_from_oom(struct mlx4_en_priv *priv)
401 for (ring = 0; ring < priv->rx_ring_num; ring++) {
402 if (mlx4_en_is_ring_empty(priv->rx_ring[ring])) {
404 napi_schedule(&priv->rx_cq[ring]->napi);
410 /* When the rx ring is running in page-per-packet mode, a released frame can go
411 * directly into a small cache, to avoid unmapping or touching the page
412 * allocator. In bpf prog performance scenarios, buffers are either forwarded
413 * or dropped, never converted to skbs, so every page can come directly from
414 * this cache when it is sized to be a multiple of the napi budget.
416 bool mlx4_en_rx_recycle(struct mlx4_en_rx_ring *ring,
417 struct mlx4_en_rx_alloc *frame)
419 struct mlx4_en_page_cache *cache = &ring->page_cache;
421 if (cache->index >= MLX4_EN_CACHE_SIZE)
424 cache->buf[cache->index].page = frame->page;
425 cache->buf[cache->index].dma = frame->dma;
430 void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv,
431 struct mlx4_en_rx_ring **pring,
432 u32 size, u16 stride)
434 struct mlx4_en_dev *mdev = priv->mdev;
435 struct mlx4_en_rx_ring *ring = *pring;
436 struct bpf_prog *old_prog;
438 old_prog = rcu_dereference_protected(
440 lockdep_is_held(&mdev->state_lock));
442 bpf_prog_put(old_prog);
443 xdp_rxq_info_unreg(&ring->xdp_rxq);
444 mlx4_free_hwq_res(mdev->dev, &ring->wqres, size * stride + TXBB_SIZE);
445 kvfree(ring->rx_info);
446 ring->rx_info = NULL;
451 void mlx4_en_deactivate_rx_ring(struct mlx4_en_priv *priv,
452 struct mlx4_en_rx_ring *ring)
456 for (i = 0; i < ring->page_cache.index; i++) {
457 dma_unmap_page(priv->ddev, ring->page_cache.buf[i].dma,
458 PAGE_SIZE, priv->dma_dir);
459 put_page(ring->page_cache.buf[i].page);
461 ring->page_cache.index = 0;
462 mlx4_en_free_rx_buf(priv, ring);
463 if (ring->stride <= TXBB_SIZE)
464 ring->buf -= TXBB_SIZE;
468 static int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv,
469 struct mlx4_en_rx_alloc *frags,
473 const struct mlx4_en_frag_info *frag_info = priv->frag_info;
474 unsigned int truesize = 0;
480 /* Collect used fragments while replacing them in the HW descriptors */
481 for (nr = 0;; frags++) {
482 frag_size = min_t(int, length, frag_info->frag_size);
489 dma_sync_single_range_for_cpu(priv->ddev, dma, frags->page_offset,
490 frag_size, priv->dma_dir);
492 __skb_fill_page_desc(skb, nr, page, frags->page_offset,
495 truesize += frag_info->frag_stride;
496 if (frag_info->frag_stride == PAGE_SIZE / 2) {
497 frags->page_offset ^= PAGE_SIZE / 2;
498 release = page_count(page) != 1 ||
499 page_is_pfmemalloc(page) ||
500 page_to_nid(page) != numa_mem_id();
501 } else if (!priv->rx_headroom) {
502 /* rx_headroom for non XDP setup is always 0.
503 * When XDP is set, the above condition will
504 * guarantee page is always released.
506 u32 sz_align = ALIGN(frag_size, SMP_CACHE_BYTES);
508 frags->page_offset += sz_align;
509 release = frags->page_offset + frag_info->frag_size > PAGE_SIZE;
512 dma_unmap_page(priv->ddev, dma, PAGE_SIZE, priv->dma_dir);
524 skb->truesize += truesize;
530 __skb_frag_unref(skb_shinfo(skb)->frags + nr, false);
535 static void validate_loopback(struct mlx4_en_priv *priv, void *va)
537 const unsigned char *data = va + ETH_HLEN;
540 for (i = 0; i < MLX4_LOOPBACK_TEST_PAYLOAD; i++) {
541 if (data[i] != (unsigned char)i)
545 priv->loopback_ok = 1;
548 static void mlx4_en_refill_rx_buffers(struct mlx4_en_priv *priv,
549 struct mlx4_en_rx_ring *ring)
551 u32 missing = ring->actual_size - (ring->prod - ring->cons);
553 /* Try to batch allocations, but not too much. */
557 if (mlx4_en_prepare_rx_desc(priv, ring,
558 ring->prod & ring->size_mask,
559 GFP_ATOMIC | __GFP_MEMALLOC))
562 } while (likely(--missing));
564 mlx4_en_update_rx_prod_db(ring);
567 /* When hardware doesn't strip the vlan, we need to calculate the checksum
568 * over it and add it to the hardware's checksum calculation
570 static inline __wsum get_fixed_vlan_csum(__wsum hw_checksum,
571 struct vlan_hdr *vlanh)
573 return csum_add(hw_checksum, *(__wsum *)vlanh);
576 /* Although the stack expects checksum which doesn't include the pseudo
577 * header, the HW adds it. To address that, we are subtracting the pseudo
578 * header checksum from the checksum value provided by the HW.
580 static int get_fixed_ipv4_csum(__wsum hw_checksum, struct sk_buff *skb,
583 __u16 length_for_csum = 0;
584 __wsum csum_pseudo_header = 0;
585 __u8 ipproto = iph->protocol;
587 if (unlikely(ipproto == IPPROTO_SCTP))
590 length_for_csum = (be16_to_cpu(iph->tot_len) - (iph->ihl << 2));
591 csum_pseudo_header = csum_tcpudp_nofold(iph->saddr, iph->daddr,
592 length_for_csum, ipproto, 0);
593 skb->csum = csum_sub(hw_checksum, csum_pseudo_header);
597 #if IS_ENABLED(CONFIG_IPV6)
598 /* In IPv6 packets, hw_checksum lacks 6 bytes from IPv6 header:
599 * 4 first bytes : priority, version, flow_lbl
600 * and 2 additional bytes : nexthdr, hop_limit.
602 static int get_fixed_ipv6_csum(__wsum hw_checksum, struct sk_buff *skb,
603 struct ipv6hdr *ipv6h)
605 __u8 nexthdr = ipv6h->nexthdr;
608 if (unlikely(nexthdr == IPPROTO_FRAGMENT ||
609 nexthdr == IPPROTO_HOPOPTS ||
610 nexthdr == IPPROTO_SCTP))
613 /* priority, version, flow_lbl */
614 temp = csum_add(hw_checksum, *(__wsum *)ipv6h);
615 /* nexthdr and hop_limit */
616 skb->csum = csum_add(temp, (__force __wsum)*(__be16 *)&ipv6h->nexthdr);
621 #define short_frame(size) ((size) <= ETH_ZLEN + ETH_FCS_LEN)
623 /* We reach this function only after checking that any of
624 * the (IPv4 | IPv6) bits are set in cqe->status.
626 static int check_csum(struct mlx4_cqe *cqe, struct sk_buff *skb, void *va,
627 netdev_features_t dev_features)
629 __wsum hw_checksum = 0;
632 /* CQE csum doesn't cover padding octets in short ethernet
633 * frames. And the pad field is appended prior to calculating
634 * and appending the FCS field.
636 * Detecting these padded frames requires to verify and parse
637 * IP headers, so we simply force all those small frames to skip
640 if (short_frame(skb->len))
643 hdr = (u8 *)va + sizeof(struct ethhdr);
644 hw_checksum = csum_unfold((__force __sum16)cqe->checksum);
646 if (cqe->vlan_my_qpn & cpu_to_be32(MLX4_CQE_CVLAN_PRESENT_MASK) &&
647 !(dev_features & NETIF_F_HW_VLAN_CTAG_RX)) {
648 hw_checksum = get_fixed_vlan_csum(hw_checksum, hdr);
649 hdr += sizeof(struct vlan_hdr);
652 #if IS_ENABLED(CONFIG_IPV6)
653 if (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPV6))
654 return get_fixed_ipv6_csum(hw_checksum, skb, hdr);
656 return get_fixed_ipv4_csum(hw_checksum, skb, hdr);
659 #if IS_ENABLED(CONFIG_IPV6)
660 #define MLX4_CQE_STATUS_IP_ANY (MLX4_CQE_STATUS_IPV4 | MLX4_CQE_STATUS_IPV6)
662 #define MLX4_CQE_STATUS_IP_ANY (MLX4_CQE_STATUS_IPV4)
665 struct mlx4_en_xdp_buff {
667 struct mlx4_cqe *cqe;
668 struct mlx4_en_dev *mdev;
669 struct mlx4_en_rx_ring *ring;
670 struct net_device *dev;
673 int mlx4_en_xdp_rx_timestamp(const struct xdp_md *ctx, u64 *timestamp)
675 struct mlx4_en_xdp_buff *_ctx = (void *)ctx;
677 if (unlikely(_ctx->ring->hwtstamp_rx_filter != HWTSTAMP_FILTER_ALL))
680 *timestamp = mlx4_en_get_hwtstamp(_ctx->mdev,
681 mlx4_en_get_cqe_ts(_ctx->cqe));
685 int mlx4_en_xdp_rx_hash(const struct xdp_md *ctx, u32 *hash,
686 enum xdp_rss_hash_type *rss_type)
688 struct mlx4_en_xdp_buff *_ctx = (void *)ctx;
689 struct mlx4_cqe *cqe = _ctx->cqe;
690 enum xdp_rss_hash_type xht = 0;
693 if (unlikely(!(_ctx->dev->features & NETIF_F_RXHASH)))
696 *hash = be32_to_cpu(cqe->immed_rss_invalid);
697 status = cqe->status;
698 if (status & cpu_to_be16(MLX4_CQE_STATUS_TCP))
699 xht = XDP_RSS_L4_TCP;
700 if (status & cpu_to_be16(MLX4_CQE_STATUS_UDP))
701 xht = XDP_RSS_L4_UDP;
702 if (status & cpu_to_be16(MLX4_CQE_STATUS_IPV4 | MLX4_CQE_STATUS_IPV4F))
703 xht |= XDP_RSS_L3_IPV4;
704 if (status & cpu_to_be16(MLX4_CQE_STATUS_IPV6)) {
705 xht |= XDP_RSS_L3_IPV6;
706 if (cqe->ipv6_ext_mask)
707 xht |= XDP_RSS_L3_DYNHDR;
714 int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int budget)
716 struct mlx4_en_priv *priv = netdev_priv(dev);
717 struct mlx4_en_xdp_buff mxbuf = {};
718 int factor = priv->cqe_factor;
719 struct mlx4_en_rx_ring *ring;
720 struct bpf_prog *xdp_prog;
721 int cq_ring = cq->ring;
722 bool doorbell_pending;
723 bool xdp_redir_flush;
724 struct mlx4_cqe *cqe;
728 if (unlikely(!priv->port_up || budget <= 0))
731 ring = priv->rx_ring[cq_ring];
733 xdp_prog = rcu_dereference_bh(ring->xdp_prog);
734 xdp_init_buff(&mxbuf.xdp, priv->frag_info[0].frag_stride, &ring->xdp_rxq);
735 doorbell_pending = false;
736 xdp_redir_flush = false;
738 /* We assume a 1:1 mapping between CQEs and Rx descriptors, so Rx
739 * descriptor offset can be deduced from the CQE index instead of
740 * reading 'cqe->index' */
741 index = cq->mcq.cons_index & ring->size_mask;
742 cqe = mlx4_en_get_cqe(cq->buf, index, priv->cqe_size) + factor;
744 /* Process all completed CQEs */
745 while (XNOR(cqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK,
746 cq->mcq.cons_index & cq->size)) {
747 struct mlx4_en_rx_alloc *frags;
748 enum pkt_hash_types hash_type;
755 frags = ring->rx_info + (index << priv->log_rx_info);
756 va = page_address(frags[0].page) + frags[0].page_offset;
759 * make sure we read the CQE after we read the ownership bit
763 /* Drop packet on bad receive or bad checksum */
764 if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) ==
765 MLX4_CQE_OPCODE_ERROR)) {
766 en_err(priv, "CQE completed in error - vendor syndrome:%d syndrome:%d\n",
767 ((struct mlx4_err_cqe *)cqe)->vendor_err_syndrome,
768 ((struct mlx4_err_cqe *)cqe)->syndrome);
771 if (unlikely(cqe->badfcs_enc & MLX4_CQE_BAD_FCS)) {
772 en_dbg(RX_ERR, priv, "Accepted frame with bad FCS\n");
776 /* Check if we need to drop the packet if SRIOV is not enabled
777 * and not performing the selftest or flb disabled
779 if (priv->flags & MLX4_EN_FLAG_RX_FILTER_NEEDED) {
780 const struct ethhdr *ethh = va;
782 /* Get pointer to first fragment since we haven't
783 * skb yet and cast it to ethhdr struct
785 dma = frags[0].dma + frags[0].page_offset;
786 dma_sync_single_for_cpu(priv->ddev, dma, sizeof(*ethh),
789 if (is_multicast_ether_addr(ethh->h_dest)) {
790 struct mlx4_mac_entry *entry;
791 struct hlist_head *bucket;
792 unsigned int mac_hash;
794 /* Drop the packet, since HW loopback-ed it */
795 mac_hash = ethh->h_source[MLX4_EN_MAC_HASH_IDX];
796 bucket = &priv->mac_hash[mac_hash];
797 hlist_for_each_entry_rcu_bh(entry, bucket, hlist) {
798 if (ether_addr_equal_64bits(entry->mac,
805 if (unlikely(priv->validate_loopback)) {
806 validate_loopback(priv, va);
811 * Packet is OK - process it.
813 length = be32_to_cpu(cqe->byte_cnt);
814 length -= ring->fcs_del;
816 /* A bpf program gets first chance to drop the packet. It may
817 * read bytes but not past the end of the frag.
824 dma = frags[0].dma + frags[0].page_offset;
825 dma_sync_single_for_cpu(priv->ddev, dma,
826 priv->frag_info[0].frag_size,
829 xdp_prepare_buff(&mxbuf.xdp, va - frags[0].page_offset,
830 frags[0].page_offset, length, true);
831 orig_data = mxbuf.xdp.data;
833 mxbuf.mdev = priv->mdev;
837 act = bpf_prog_run_xdp(xdp_prog, &mxbuf.xdp);
839 length = mxbuf.xdp.data_end - mxbuf.xdp.data;
840 if (mxbuf.xdp.data != orig_data) {
841 frags[0].page_offset = mxbuf.xdp.data -
842 mxbuf.xdp.data_hard_start;
850 if (likely(!xdp_do_redirect(dev, &mxbuf.xdp, xdp_prog))) {
851 ring->xdp_redirect++;
852 xdp_redir_flush = true;
853 frags[0].page = NULL;
856 ring->xdp_redirect_fail++;
857 trace_xdp_exception(dev, xdp_prog, act);
858 goto xdp_drop_no_cnt;
860 if (likely(!mlx4_en_xmit_frame(ring, frags, priv,
862 &doorbell_pending))) {
863 frags[0].page = NULL;
866 trace_xdp_exception(dev, xdp_prog, act);
867 goto xdp_drop_no_cnt; /* Drop on xmit failure */
869 bpf_warn_invalid_xdp_action(dev, xdp_prog, act);
872 trace_xdp_exception(dev, xdp_prog, act);
881 ring->bytes += length;
884 skb = napi_get_frags(&cq->napi);
888 if (unlikely(ring->hwtstamp_rx_filter == HWTSTAMP_FILTER_ALL)) {
889 u64 timestamp = mlx4_en_get_cqe_ts(cqe);
891 mlx4_en_fill_hwtstamps(priv->mdev, skb_hwtstamps(skb),
894 skb_record_rx_queue(skb, cq_ring);
896 if (likely(dev->features & NETIF_F_RXCSUM)) {
897 /* TODO: For IP non TCP/UDP packets when csum complete is
898 * not an option (not supported or any other reason) we can
899 * actually check cqe IPOK status bit and report
900 * CHECKSUM_UNNECESSARY rather than CHECKSUM_NONE
902 if ((cqe->status & cpu_to_be16(MLX4_CQE_STATUS_TCP |
903 MLX4_CQE_STATUS_UDP)) &&
904 (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPOK)) &&
905 cqe->checksum == cpu_to_be16(0xffff)) {
908 l2_tunnel = (dev->hw_enc_features & NETIF_F_RXCSUM) &&
909 (cqe->vlan_my_qpn & cpu_to_be32(MLX4_CQE_L2_TUNNEL));
910 ip_summed = CHECKSUM_UNNECESSARY;
911 hash_type = PKT_HASH_TYPE_L4;
916 if (!(priv->flags & MLX4_EN_FLAG_RX_CSUM_NON_TCP_UDP &&
917 (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IP_ANY))))
919 if (check_csum(cqe, skb, va, dev->features))
921 ip_summed = CHECKSUM_COMPLETE;
922 hash_type = PKT_HASH_TYPE_L3;
923 ring->csum_complete++;
927 ip_summed = CHECKSUM_NONE;
928 hash_type = PKT_HASH_TYPE_L3;
931 skb->ip_summed = ip_summed;
932 if (dev->features & NETIF_F_RXHASH)
934 be32_to_cpu(cqe->immed_rss_invalid),
937 if ((cqe->vlan_my_qpn &
938 cpu_to_be32(MLX4_CQE_CVLAN_PRESENT_MASK)) &&
939 (dev->features & NETIF_F_HW_VLAN_CTAG_RX))
940 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
941 be16_to_cpu(cqe->sl_vid));
942 else if ((cqe->vlan_my_qpn &
943 cpu_to_be32(MLX4_CQE_SVLAN_PRESENT_MASK)) &&
944 (dev->features & NETIF_F_HW_VLAN_STAG_RX))
945 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021AD),
946 be16_to_cpu(cqe->sl_vid));
948 nr = mlx4_en_complete_rx_desc(priv, frags, skb, length);
950 skb_shinfo(skb)->nr_frags = nr;
952 skb->data_len = length;
953 napi_gro_frags(&cq->napi);
955 __vlan_hwaccel_clear_tag(skb);
959 ++cq->mcq.cons_index;
960 index = (cq->mcq.cons_index) & ring->size_mask;
961 cqe = mlx4_en_get_cqe(cq->buf, index, priv->cqe_size) + factor;
962 if (unlikely(++polled == budget))
969 if (likely(polled)) {
970 if (doorbell_pending) {
971 priv->tx_cq[TX_XDP][cq_ring]->xdp_busy = true;
972 mlx4_en_xmit_doorbell(priv->tx_ring[TX_XDP][cq_ring]);
975 mlx4_cq_set_ci(&cq->mcq);
976 wmb(); /* ensure HW sees CQ consumer before we post new buffers */
977 ring->cons = cq->mcq.cons_index;
980 mlx4_en_refill_rx_buffers(priv, ring);
986 void mlx4_en_rx_irq(struct mlx4_cq *mcq)
988 struct mlx4_en_cq *cq = container_of(mcq, struct mlx4_en_cq, mcq);
989 struct mlx4_en_priv *priv = netdev_priv(cq->dev);
991 if (likely(priv->port_up))
992 napi_schedule_irqoff(&cq->napi);
994 mlx4_en_arm_cq(priv, cq);
997 /* Rx CQ polling - called by NAPI */
998 int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget)
1000 struct mlx4_en_cq *cq = container_of(napi, struct mlx4_en_cq, napi);
1001 struct net_device *dev = cq->dev;
1002 struct mlx4_en_priv *priv = netdev_priv(dev);
1003 struct mlx4_en_cq *xdp_tx_cq = NULL;
1004 bool clean_complete = true;
1010 if (priv->tx_ring_num[TX_XDP]) {
1011 xdp_tx_cq = priv->tx_cq[TX_XDP][cq->ring];
1012 if (xdp_tx_cq->xdp_busy) {
1013 clean_complete = mlx4_en_process_tx_cq(dev, xdp_tx_cq,
1015 xdp_tx_cq->xdp_busy = !clean_complete;
1019 done = mlx4_en_process_rx_cq(dev, cq, budget);
1021 /* If we used up all the quota - we're probably not done yet... */
1022 if (done == budget || !clean_complete) {
1025 /* in case we got here because of !clean_complete */
1028 cpu_curr = smp_processor_id();
1030 if (likely(cpumask_test_cpu(cpu_curr, cq->aff_mask)))
1033 /* Current cpu is not according to smp_irq_affinity -
1034 * probably affinity changed. Need to stop this NAPI
1035 * poll, and restart it on the right CPU.
1036 * Try to avoid returning a too small value (like 0),
1037 * to not fool net_rx_action() and its netdev_budget
1043 if (likely(napi_complete_done(napi, done)))
1044 mlx4_en_arm_cq(priv, cq);
1048 void mlx4_en_calc_rx_buf(struct net_device *dev)
1050 struct mlx4_en_priv *priv = netdev_priv(dev);
1051 int eff_mtu = MLX4_EN_EFF_MTU(dev->mtu);
1054 /* bpf requires buffers to be set up as 1 packet per page.
1055 * This only works when num_frags == 1.
1057 if (priv->tx_ring_num[TX_XDP]) {
1058 priv->frag_info[0].frag_size = eff_mtu;
1059 /* This will gain efficient xdp frame recycling at the
1060 * expense of more costly truesize accounting
1062 priv->frag_info[0].frag_stride = PAGE_SIZE;
1063 priv->dma_dir = DMA_BIDIRECTIONAL;
1064 priv->rx_headroom = XDP_PACKET_HEADROOM;
1067 int frag_size_max = 2048, buf_size = 0;
1069 /* should not happen, right ? */
1070 if (eff_mtu > PAGE_SIZE + (MLX4_EN_MAX_RX_FRAGS - 1) * 2048)
1071 frag_size_max = PAGE_SIZE;
1073 while (buf_size < eff_mtu) {
1074 int frag_stride, frag_size = eff_mtu - buf_size;
1077 if (i < MLX4_EN_MAX_RX_FRAGS - 1)
1078 frag_size = min(frag_size, frag_size_max);
1080 priv->frag_info[i].frag_size = frag_size;
1081 frag_stride = ALIGN(frag_size, SMP_CACHE_BYTES);
1082 /* We can only pack 2 1536-bytes frames in on 4K page
1083 * Therefore, each frame would consume more bytes (truesize)
1085 nb = PAGE_SIZE / frag_stride;
1086 pad = (PAGE_SIZE - nb * frag_stride) / nb;
1087 pad &= ~(SMP_CACHE_BYTES - 1);
1088 priv->frag_info[i].frag_stride = frag_stride + pad;
1090 buf_size += frag_size;
1093 priv->dma_dir = DMA_FROM_DEVICE;
1094 priv->rx_headroom = 0;
1097 priv->num_frags = i;
1098 priv->rx_skb_size = eff_mtu;
1099 priv->log_rx_info = ROUNDUP_LOG2(i * sizeof(struct mlx4_en_rx_alloc));
1101 en_dbg(DRV, priv, "Rx buffer scatter-list (effective-mtu:%d num_frags:%d):\n",
1102 eff_mtu, priv->num_frags);
1103 for (i = 0; i < priv->num_frags; i++) {
1106 " frag:%d - size:%d stride:%d\n",
1108 priv->frag_info[i].frag_size,
1109 priv->frag_info[i].frag_stride);
1113 /* RSS related functions */
1115 static int mlx4_en_config_rss_qp(struct mlx4_en_priv *priv, int qpn,
1116 struct mlx4_en_rx_ring *ring,
1117 enum mlx4_qp_state *state,
1120 struct mlx4_en_dev *mdev = priv->mdev;
1121 struct mlx4_qp_context *context;
1124 context = kzalloc(sizeof(*context), GFP_KERNEL);
1128 err = mlx4_qp_alloc(mdev->dev, qpn, qp);
1130 en_err(priv, "Failed to allocate qp #%x\n", qpn);
1133 qp->event = mlx4_en_sqp_event;
1135 mlx4_en_fill_qp_context(priv, ring->actual_size, ring->stride, 0, 0,
1136 qpn, ring->cqn, -1, context);
1137 context->db_rec_addr = cpu_to_be64(ring->wqres.db.dma);
1139 /* Cancel FCS removal if FW allows */
1140 if (mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_FCS_KEEP) {
1141 context->param3 |= cpu_to_be32(1 << 29);
1142 if (priv->dev->features & NETIF_F_RXFCS)
1145 ring->fcs_del = ETH_FCS_LEN;
1149 err = mlx4_qp_to_ready(mdev->dev, &ring->wqres.mtt, context, qp, state);
1151 mlx4_qp_remove(mdev->dev, qp);
1152 mlx4_qp_free(mdev->dev, qp);
1154 mlx4_en_update_rx_prod_db(ring);
1160 int mlx4_en_create_drop_qp(struct mlx4_en_priv *priv)
1165 err = mlx4_qp_reserve_range(priv->mdev->dev, 1, 1, &qpn,
1167 MLX4_RES_USAGE_DRIVER);
1169 en_err(priv, "Failed reserving drop qpn\n");
1172 err = mlx4_qp_alloc(priv->mdev->dev, qpn, &priv->drop_qp);
1174 en_err(priv, "Failed allocating drop qp\n");
1175 mlx4_qp_release_range(priv->mdev->dev, qpn, 1);
1182 void mlx4_en_destroy_drop_qp(struct mlx4_en_priv *priv)
1186 qpn = priv->drop_qp.qpn;
1187 mlx4_qp_remove(priv->mdev->dev, &priv->drop_qp);
1188 mlx4_qp_free(priv->mdev->dev, &priv->drop_qp);
1189 mlx4_qp_release_range(priv->mdev->dev, qpn, 1);
1192 /* Allocate rx qp's and configure them according to rss map */
1193 int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv)
1195 struct mlx4_en_dev *mdev = priv->mdev;
1196 struct mlx4_en_rss_map *rss_map = &priv->rss_map;
1197 struct mlx4_qp_context context;
1198 struct mlx4_rss_context *rss_context;
1201 u8 rss_mask = (MLX4_RSS_IPV4 | MLX4_RSS_TCP_IPV4 | MLX4_RSS_IPV6 |
1208 en_dbg(DRV, priv, "Configuring rss steering\n");
1210 flags = priv->rx_ring_num == 1 ? MLX4_RESERVE_A0_QP : 0;
1211 err = mlx4_qp_reserve_range(mdev->dev, priv->rx_ring_num,
1213 &rss_map->base_qpn, flags,
1214 MLX4_RES_USAGE_DRIVER);
1216 en_err(priv, "Failed reserving %d qps\n", priv->rx_ring_num);
1220 for (i = 0; i < priv->rx_ring_num; i++) {
1221 qpn = rss_map->base_qpn + i;
1222 err = mlx4_en_config_rss_qp(priv, qpn, priv->rx_ring[i],
1231 if (priv->rx_ring_num == 1) {
1232 rss_map->indir_qp = &rss_map->qps[0];
1233 priv->base_qpn = rss_map->indir_qp->qpn;
1234 en_info(priv, "Optimized Non-RSS steering\n");
1238 rss_map->indir_qp = kzalloc(sizeof(*rss_map->indir_qp), GFP_KERNEL);
1239 if (!rss_map->indir_qp) {
1244 /* Configure RSS indirection qp */
1245 err = mlx4_qp_alloc(mdev->dev, priv->base_qpn, rss_map->indir_qp);
1247 en_err(priv, "Failed to allocate RSS indirection QP\n");
1251 rss_map->indir_qp->event = mlx4_en_sqp_event;
1252 mlx4_en_fill_qp_context(priv, 0, 0, 0, 1, priv->base_qpn,
1253 priv->rx_ring[0]->cqn, -1, &context);
1255 if (!priv->prof->rss_rings || priv->prof->rss_rings > priv->rx_ring_num)
1256 rss_rings = priv->rx_ring_num;
1258 rss_rings = priv->prof->rss_rings;
1260 ptr = ((void *) &context) + offsetof(struct mlx4_qp_context, pri_path)
1261 + MLX4_RSS_OFFSET_IN_QPC_PRI_PATH;
1263 rss_context->base_qpn = cpu_to_be32(ilog2(rss_rings) << 24 |
1264 (rss_map->base_qpn));
1265 rss_context->default_qpn = cpu_to_be32(rss_map->base_qpn);
1266 if (priv->mdev->profile.udp_rss) {
1267 rss_mask |= MLX4_RSS_UDP_IPV4 | MLX4_RSS_UDP_IPV6;
1268 rss_context->base_qpn_udp = rss_context->default_qpn;
1271 if (mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) {
1272 en_info(priv, "Setting RSS context tunnel type to RSS on inner headers\n");
1273 rss_mask |= MLX4_RSS_BY_INNER_HEADERS;
1276 rss_context->flags = rss_mask;
1277 rss_context->hash_fn = MLX4_RSS_HASH_TOP;
1278 if (priv->rss_hash_fn == ETH_RSS_HASH_XOR) {
1279 rss_context->hash_fn = MLX4_RSS_HASH_XOR;
1280 } else if (priv->rss_hash_fn == ETH_RSS_HASH_TOP) {
1281 rss_context->hash_fn = MLX4_RSS_HASH_TOP;
1282 memcpy(rss_context->rss_key, priv->rss_key,
1283 MLX4_EN_RSS_KEY_SIZE);
1285 en_err(priv, "Unknown RSS hash function requested\n");
1290 err = mlx4_qp_to_ready(mdev->dev, &priv->res.mtt, &context,
1291 rss_map->indir_qp, &rss_map->indir_state);
1298 mlx4_qp_modify(mdev->dev, NULL, rss_map->indir_state,
1299 MLX4_QP_STATE_RST, NULL, 0, 0, rss_map->indir_qp);
1300 mlx4_qp_remove(mdev->dev, rss_map->indir_qp);
1301 mlx4_qp_free(mdev->dev, rss_map->indir_qp);
1303 kfree(rss_map->indir_qp);
1304 rss_map->indir_qp = NULL;
1306 for (i = 0; i < good_qps; i++) {
1307 mlx4_qp_modify(mdev->dev, NULL, rss_map->state[i],
1308 MLX4_QP_STATE_RST, NULL, 0, 0, &rss_map->qps[i]);
1309 mlx4_qp_remove(mdev->dev, &rss_map->qps[i]);
1310 mlx4_qp_free(mdev->dev, &rss_map->qps[i]);
1312 mlx4_qp_release_range(mdev->dev, rss_map->base_qpn, priv->rx_ring_num);
1316 void mlx4_en_release_rss_steer(struct mlx4_en_priv *priv)
1318 struct mlx4_en_dev *mdev = priv->mdev;
1319 struct mlx4_en_rss_map *rss_map = &priv->rss_map;
1322 if (priv->rx_ring_num > 1) {
1323 mlx4_qp_modify(mdev->dev, NULL, rss_map->indir_state,
1324 MLX4_QP_STATE_RST, NULL, 0, 0,
1326 mlx4_qp_remove(mdev->dev, rss_map->indir_qp);
1327 mlx4_qp_free(mdev->dev, rss_map->indir_qp);
1328 kfree(rss_map->indir_qp);
1329 rss_map->indir_qp = NULL;
1332 for (i = 0; i < priv->rx_ring_num; i++) {
1333 mlx4_qp_modify(mdev->dev, NULL, rss_map->state[i],
1334 MLX4_QP_STATE_RST, NULL, 0, 0, &rss_map->qps[i]);
1335 mlx4_qp_remove(mdev->dev, &rss_map->qps[i]);
1336 mlx4_qp_free(mdev->dev, &rss_map->qps[i]);
1338 mlx4_qp_release_range(mdev->dev, rss_map->base_qpn, priv->rx_ring_num);