gve: add support to change ring size via ethtool
authorHarshitha Ramamurthy <hramamurthy@google.com>
Mon, 1 Apr 2024 23:45:30 +0000 (23:45 +0000)
committerDavid S. Miller <davem@davemloft.net>
Wed, 3 Apr 2024 10:11:15 +0000 (11:11 +0100)
Allow the user to change ring size via ethtool if
supported by the device. The driver relies on the
ring size ranges queried from device to validate
ring sizes requested by the user.

Reviewed-by: Praveen Kaligineedi <pkaligineedi@google.com>
Reviewed-by: Willem de Bruijn <willemb@google.com>
Signed-off-by: Harshitha Ramamurthy <hramamurthy@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
drivers/net/ethernet/google/gve/gve.h
drivers/net/ethernet/google/gve/gve_ethtool.c
drivers/net/ethernet/google/gve/gve_main.c

index 669cacdae4f4f02390ecb5a8cd77fa62600ab9c3..e97633b68e2551070ec5cee09ac6746d27ae1a98 100644 (file)
@@ -1159,6 +1159,14 @@ int gve_set_hsplit_config(struct gve_priv *priv, u8 tcp_data_split);
 /* Reset */
 void gve_schedule_reset(struct gve_priv *priv);
 int gve_reset(struct gve_priv *priv, bool attempt_teardown);
+void gve_get_curr_alloc_cfgs(struct gve_priv *priv,
+                            struct gve_qpls_alloc_cfg *qpls_alloc_cfg,
+                            struct gve_tx_alloc_rings_cfg *tx_alloc_cfg,
+                            struct gve_rx_alloc_rings_cfg *rx_alloc_cfg);
+int gve_adjust_config(struct gve_priv *priv,
+                     struct gve_qpls_alloc_cfg *qpls_alloc_cfg,
+                     struct gve_tx_alloc_rings_cfg *tx_alloc_cfg,
+                     struct gve_rx_alloc_rings_cfg *rx_alloc_cfg);
 int gve_adjust_queues(struct gve_priv *priv,
                      struct gve_queue_config new_rx_config,
                      struct gve_queue_config new_tx_config);
index dbe05402d40b14acd9dc6b62562d8d63bdc7fa57..815dead31673f8cbfe8541f8183ad18304bb4a3c 100644 (file)
@@ -490,8 +490,8 @@ static void gve_get_ringparam(struct net_device *netdev,
 {
        struct gve_priv *priv = netdev_priv(netdev);
 
-       cmd->rx_max_pending = priv->rx_desc_cnt;
-       cmd->tx_max_pending = priv->tx_desc_cnt;
+       cmd->rx_max_pending = priv->max_rx_desc_cnt;
+       cmd->tx_max_pending = priv->max_tx_desc_cnt;
        cmd->rx_pending = priv->rx_desc_cnt;
        cmd->tx_pending = priv->tx_desc_cnt;
 
@@ -503,20 +503,93 @@ static void gve_get_ringparam(struct net_device *netdev,
                kernel_cmd->tcp_data_split = ETHTOOL_TCP_DATA_SPLIT_DISABLED;
 }
 
+static int gve_adjust_ring_sizes(struct gve_priv *priv,
+                                u16 new_tx_desc_cnt,
+                                u16 new_rx_desc_cnt)
+{
+       struct gve_tx_alloc_rings_cfg tx_alloc_cfg = {0};
+       struct gve_rx_alloc_rings_cfg rx_alloc_cfg = {0};
+       struct gve_qpls_alloc_cfg qpls_alloc_cfg = {0};
+       struct gve_qpl_config new_qpl_cfg;
+       int err;
+
+       /* get current queue configuration */
+       gve_get_curr_alloc_cfgs(priv, &qpls_alloc_cfg,
+                               &tx_alloc_cfg, &rx_alloc_cfg);
+
+       /* copy over the new ring_size from ethtool */
+       tx_alloc_cfg.ring_size = new_tx_desc_cnt;
+       rx_alloc_cfg.ring_size = new_rx_desc_cnt;
+
+       /* qpl_cfg is not read-only, it contains a map that gets updated as
+        * rings are allocated, which is why we cannot use the yet unreleased
+        * one in priv.
+        */
+       qpls_alloc_cfg.qpl_cfg = &new_qpl_cfg;
+       tx_alloc_cfg.qpl_cfg = &new_qpl_cfg;
+       rx_alloc_cfg.qpl_cfg = &new_qpl_cfg;
+
+       if (netif_running(priv->dev)) {
+               err = gve_adjust_config(priv, &qpls_alloc_cfg,
+                                       &tx_alloc_cfg, &rx_alloc_cfg);
+               if (err)
+                       return err;
+       }
+
+       /* Set new ring_size for the next up */
+       priv->tx_desc_cnt = new_tx_desc_cnt;
+       priv->rx_desc_cnt = new_rx_desc_cnt;
+
+       return 0;
+}
+
+static int gve_validate_req_ring_size(struct gve_priv *priv, u16 new_tx_desc_cnt,
+                                     u16 new_rx_desc_cnt)
+{
+       /* check for valid range */
+       if (new_tx_desc_cnt < priv->min_tx_desc_cnt ||
+           new_tx_desc_cnt > priv->max_tx_desc_cnt ||
+           new_rx_desc_cnt < priv->min_rx_desc_cnt ||
+           new_rx_desc_cnt > priv->max_rx_desc_cnt) {
+               dev_err(&priv->pdev->dev, "Requested descriptor count out of range\n");
+               return -EINVAL;
+       }
+
+       if (!is_power_of_2(new_tx_desc_cnt) || !is_power_of_2(new_rx_desc_cnt)) {
+               dev_err(&priv->pdev->dev, "Requested descriptor count has to be a power of 2\n");
+               return -EINVAL;
+       }
+       return 0;
+}
+
 static int gve_set_ringparam(struct net_device *netdev,
                             struct ethtool_ringparam *cmd,
                             struct kernel_ethtool_ringparam *kernel_cmd,
                             struct netlink_ext_ack *extack)
 {
        struct gve_priv *priv = netdev_priv(netdev);
+       u16 new_tx_cnt, new_rx_cnt;
+       int err;
+
+       err = gve_set_hsplit_config(priv, kernel_cmd->tcp_data_split);
+       if (err)
+               return err;
 
-       if (priv->tx_desc_cnt != cmd->tx_pending ||
-           priv->rx_desc_cnt != cmd->rx_pending) {
-               dev_info(&priv->pdev->dev, "Modify ring size is not supported.\n");
+       if (cmd->tx_pending == priv->tx_desc_cnt && cmd->rx_pending == priv->rx_desc_cnt)
+               return 0;
+
+       if (!priv->modify_ring_size_enabled) {
+               dev_err(&priv->pdev->dev, "Modify ring size is not supported.\n");
                return -EOPNOTSUPP;
        }
 
-       return gve_set_hsplit_config(priv, kernel_cmd->tcp_data_split);
+       new_tx_cnt = cmd->tx_pending;
+       new_rx_cnt = cmd->rx_pending;
+
+       if (gve_validate_req_ring_size(priv, new_tx_cnt, new_rx_cnt))
+               return -EINVAL;
+
+       return gve_adjust_ring_sizes(priv, new_tx_cnt, new_rx_cnt);
 }
 
 static int gve_user_reset(struct net_device *netdev, u32 *flags)
index 470447c0490fa8e203bd1bd60a5b3c45f847c6d0..a515e5af843cbc629882c95543cd87b53111488e 100644 (file)
@@ -1314,10 +1314,10 @@ static void gve_rx_get_curr_alloc_cfg(struct gve_priv *priv,
        cfg->rx = priv->rx;
 }
 
-static void gve_get_curr_alloc_cfgs(struct gve_priv *priv,
-                                   struct gve_qpls_alloc_cfg *qpls_alloc_cfg,
-                                   struct gve_tx_alloc_rings_cfg *tx_alloc_cfg,
-                                   struct gve_rx_alloc_rings_cfg *rx_alloc_cfg)
+void gve_get_curr_alloc_cfgs(struct gve_priv *priv,
+                            struct gve_qpls_alloc_cfg *qpls_alloc_cfg,
+                            struct gve_tx_alloc_rings_cfg *tx_alloc_cfg,
+                            struct gve_rx_alloc_rings_cfg *rx_alloc_cfg)
 {
        gve_qpls_get_curr_alloc_cfg(priv, qpls_alloc_cfg);
        gve_tx_get_curr_alloc_cfg(priv, tx_alloc_cfg);
@@ -1867,10 +1867,10 @@ static int gve_xdp(struct net_device *dev, struct netdev_bpf *xdp)
        }
 }
 
-static int gve_adjust_config(struct gve_priv *priv,
-                            struct gve_qpls_alloc_cfg *qpls_alloc_cfg,
-                            struct gve_tx_alloc_rings_cfg *tx_alloc_cfg,
-                            struct gve_rx_alloc_rings_cfg *rx_alloc_cfg)
+int gve_adjust_config(struct gve_priv *priv,
+                     struct gve_qpls_alloc_cfg *qpls_alloc_cfg,
+                     struct gve_tx_alloc_rings_cfg *tx_alloc_cfg,
+                     struct gve_rx_alloc_rings_cfg *rx_alloc_cfg)
 {
        int err;