gve: convert to use netmem for DQO RDA mode
authorHarshitha Ramamurthy <hramamurthy@google.com>
Fri, 7 Mar 2025 00:39:05 +0000 (00:39 +0000)
committerJakub Kicinski <kuba@kernel.org>
Sat, 8 Mar 2025 03:29:44 +0000 (19:29 -0800)
To add netmem support to the gve driver, add a union
to the struct gve_rx_slot_page_info. netmem_ref is used for
DQO queue format's raw DMA addressing(RDA) mode. The struct
page is retained for other usecases.

Then, switch to using relevant netmem helper functions for
page pool and skb frag management.

Reviewed-by: Mina Almasry <almasrymina@google.com>
Reviewed-by: Willem de Bruijn <willemb@google.com>
Signed-off-by: Harshitha Ramamurthy <hramamurthy@google.com>
Link: https://patch.msgid.link/20250307003905.601175-1-hramamurthy@google.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
drivers/net/ethernet/google/gve/gve.h
drivers/net/ethernet/google/gve/gve_buffer_mgmt_dqo.c
drivers/net/ethernet/google/gve/gve_rx_dqo.c

index 216d6e157befbf8f578c15d5e63ce5db900bc6b4..483c43bab3a999fb504430202ec3d13e75f98af0 100644 (file)
@@ -105,7 +105,13 @@ struct gve_rx_desc_queue {
 
 /* The page info for a single slot in the RX data queue */
 struct gve_rx_slot_page_info {
-       struct page *page;
+       /* netmem is used for DQO RDA mode
+        * page is used in all other modes
+        */
+       union {
+               struct page *page;
+               netmem_ref netmem;
+       };
        void *page_address;
        u32 page_offset; /* offset to write to in page */
        unsigned int buf_size;
index 403f0f335ba6698922d3c20559106265e195ee67..af84cb88f828c1dde46e8e77baf23e94307e27ec 100644 (file)
@@ -205,32 +205,33 @@ void gve_free_to_page_pool(struct gve_rx_ring *rx,
                           struct gve_rx_buf_state_dqo *buf_state,
                           bool allow_direct)
 {
-       struct page *page = buf_state->page_info.page;
+       netmem_ref netmem = buf_state->page_info.netmem;
 
-       if (!page)
+       if (!netmem)
                return;
 
-       page_pool_put_full_page(page->pp, page, allow_direct);
-       buf_state->page_info.page = NULL;
+       page_pool_put_full_netmem(netmem_get_pp(netmem), netmem, allow_direct);
+       buf_state->page_info.netmem = 0;
 }
 
 static int gve_alloc_from_page_pool(struct gve_rx_ring *rx,
                                    struct gve_rx_buf_state_dqo *buf_state)
 {
        struct gve_priv *priv = rx->gve;
-       struct page *page;
+       netmem_ref netmem;
 
        buf_state->page_info.buf_size = priv->data_buffer_size_dqo;
-       page = page_pool_alloc(rx->dqo.page_pool,
-                              &buf_state->page_info.page_offset,
-                              &buf_state->page_info.buf_size, GFP_ATOMIC);
+       netmem = page_pool_alloc_netmem(rx->dqo.page_pool,
+                                       &buf_state->page_info.page_offset,
+                                       &buf_state->page_info.buf_size,
+                                       GFP_ATOMIC);
 
-       if (!page)
+       if (!netmem)
                return -ENOMEM;
 
-       buf_state->page_info.page = page;
-       buf_state->page_info.page_address = page_address(page);
-       buf_state->addr = page_pool_get_dma_addr(page);
+       buf_state->page_info.netmem = netmem;
+       buf_state->page_info.page_address = netmem_address(netmem);
+       buf_state->addr = page_pool_get_dma_addr_netmem(netmem);
 
        return 0;
 }
@@ -269,7 +270,7 @@ void gve_reuse_buffer(struct gve_rx_ring *rx,
                      struct gve_rx_buf_state_dqo *buf_state)
 {
        if (rx->dqo.page_pool) {
-               buf_state->page_info.page = NULL;
+               buf_state->page_info.netmem = 0;
                gve_free_buf_state(rx, buf_state);
        } else {
                gve_dec_pagecnt_bias(&buf_state->page_info);
index f0674a4435670851feb64e5f3e407ccc87dd3be1..856ade0c209f6fdd2fe7c5d625eb325a69869a7b 100644 (file)
@@ -476,6 +476,24 @@ static int gve_rx_copy_ondemand(struct gve_rx_ring *rx,
        return 0;
 }
 
+static void gve_skb_add_rx_frag(struct gve_rx_ring *rx,
+                               struct gve_rx_buf_state_dqo *buf_state,
+                               int num_frags, u16 buf_len)
+{
+       if (rx->dqo.page_pool) {
+               skb_add_rx_frag_netmem(rx->ctx.skb_tail, num_frags,
+                                      buf_state->page_info.netmem,
+                                      buf_state->page_info.page_offset,
+                                      buf_len,
+                                      buf_state->page_info.buf_size);
+       } else {
+               skb_add_rx_frag(rx->ctx.skb_tail, num_frags,
+                               buf_state->page_info.page,
+                               buf_state->page_info.page_offset,
+                               buf_len, buf_state->page_info.buf_size);
+       }
+}
+
 /* Chains multi skbs for single rx packet.
  * Returns 0 if buffer is appended, -1 otherwise.
  */
@@ -513,10 +531,7 @@ static int gve_rx_append_frags(struct napi_struct *napi,
        if (gve_rx_should_trigger_copy_ondemand(rx))
                return gve_rx_copy_ondemand(rx, buf_state, buf_len);
 
-       skb_add_rx_frag(rx->ctx.skb_tail, num_frags,
-                       buf_state->page_info.page,
-                       buf_state->page_info.page_offset,
-                       buf_len, buf_state->page_info.buf_size);
+       gve_skb_add_rx_frag(rx, buf_state, num_frags, buf_len);
        gve_reuse_buffer(rx, buf_state);
        return 0;
 }
@@ -561,7 +576,12 @@ static int gve_rx_dqo(struct napi_struct *napi, struct gve_rx_ring *rx,
        /* Page might have not been used for awhile and was likely last written
         * by a different thread.
         */
-       prefetch(buf_state->page_info.page);
+       if (rx->dqo.page_pool) {
+               if (!netmem_is_net_iov(buf_state->page_info.netmem))
+                       prefetch(netmem_to_page(buf_state->page_info.netmem));
+       } else {
+               prefetch(buf_state->page_info.page);
+       }
 
        /* Copy the header into the skb in the case of header split */
        if (hsplit) {
@@ -632,9 +652,7 @@ static int gve_rx_dqo(struct napi_struct *napi, struct gve_rx_ring *rx,
        if (rx->dqo.page_pool)
                skb_mark_for_recycle(rx->ctx.skb_head);
 
-       skb_add_rx_frag(rx->ctx.skb_head, 0, buf_state->page_info.page,
-                       buf_state->page_info.page_offset, buf_len,
-                       buf_state->page_info.buf_size);
+       gve_skb_add_rx_frag(rx, buf_state, 0, buf_len);
        gve_reuse_buffer(rx, buf_state);
        return 0;