virtio_net: skip RCU read lock by checking xdp_enabled of vi
authorLi RongQing <lirongqing@baidu.com>
Sat, 9 Oct 2021 09:32:43 +0000 (17:32 +0800)
committerDavid S. Miller <davem@davemloft.net>
Sun, 10 Oct 2021 10:31:09 +0000 (11:31 +0100)
networking benchmark shows that __rcu_read_lock and
__rcu_read_unlock takes some cpu cycles, and we can avoid
calling them partially in virtio rx path by check xdp_enabled
of vi, and xdp is disabled most of time

Signed-off-by: Li RongQing <lirongqing@baidu.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
drivers/net/virtio_net.c

index 2ed49884565f0f69bb3cf0c277ccc8b6cd5091b6..74d2be438180eb08be9344874c53ed3bb1fd7945 100644 (file)
@@ -734,6 +734,12 @@ static struct sk_buff *receive_small(struct net_device *dev,
                dev->stats.rx_length_errors++;
                goto err_len;
        }
+
+       if (likely(!vi->xdp_enabled)) {
+               xdp_prog = NULL;
+               goto skip_xdp;
+       }
+
        rcu_read_lock();
        xdp_prog = rcu_dereference(rq->xdp_prog);
        if (xdp_prog) {
@@ -816,6 +822,7 @@ static struct sk_buff *receive_small(struct net_device *dev,
        }
        rcu_read_unlock();
 
+skip_xdp:
        skb = build_skb(buf, buflen);
        if (!skb) {
                put_page(page);
@@ -897,6 +904,12 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
                dev->stats.rx_length_errors++;
                goto err_skb;
        }
+
+       if (likely(!vi->xdp_enabled)) {
+               xdp_prog = NULL;
+               goto skip_xdp;
+       }
+
        rcu_read_lock();
        xdp_prog = rcu_dereference(rq->xdp_prog);
        if (xdp_prog) {
@@ -1024,6 +1037,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
        }
        rcu_read_unlock();
 
+skip_xdp:
        head_skb = page_to_skb(vi, rq, page, offset, len, truesize, !xdp_prog,
                               metasize, headroom);
        curr_skb = head_skb;