Merge tag 'mvebu-dt64-6.4-1' of git://git.kernel.org/pub/scm/linux/kernel/git/gclemen...
[linux-block.git] / drivers / net / ethernet / google / gve / gve_ethtool.c
1 // SPDX-License-Identifier: (GPL-2.0 OR MIT)
2 /* Google virtual Ethernet (gve) driver
3  *
4  * Copyright (C) 2015-2021 Google, Inc.
5  */
6
7 #include <linux/ethtool.h>
8 #include <linux/rtnetlink.h>
9 #include "gve.h"
10 #include "gve_adminq.h"
11 #include "gve_dqo.h"
12
13 static void gve_get_drvinfo(struct net_device *netdev,
14                             struct ethtool_drvinfo *info)
15 {
16         struct gve_priv *priv = netdev_priv(netdev);
17
18         strscpy(info->driver, "gve", sizeof(info->driver));
19         strscpy(info->version, gve_version_str, sizeof(info->version));
20         strscpy(info->bus_info, pci_name(priv->pdev), sizeof(info->bus_info));
21 }
22
23 static void gve_set_msglevel(struct net_device *netdev, u32 value)
24 {
25         struct gve_priv *priv = netdev_priv(netdev);
26
27         priv->msg_enable = value;
28 }
29
30 static u32 gve_get_msglevel(struct net_device *netdev)
31 {
32         struct gve_priv *priv = netdev_priv(netdev);
33
34         return priv->msg_enable;
35 }
36
37 static const char gve_gstrings_main_stats[][ETH_GSTRING_LEN] = {
38         "rx_packets", "tx_packets", "rx_bytes", "tx_bytes",
39         "rx_dropped", "tx_dropped", "tx_timeouts",
40         "rx_skb_alloc_fail", "rx_buf_alloc_fail", "rx_desc_err_dropped_pkt",
41         "interface_up_cnt", "interface_down_cnt", "reset_cnt",
42         "page_alloc_fail", "dma_mapping_error", "stats_report_trigger_cnt",
43 };
44
45 static const char gve_gstrings_rx_stats[][ETH_GSTRING_LEN] = {
46         "rx_posted_desc[%u]", "rx_completed_desc[%u]", "rx_consumed_desc[%u]", "rx_bytes[%u]",
47         "rx_cont_packet_cnt[%u]", "rx_frag_flip_cnt[%u]", "rx_frag_copy_cnt[%u]",
48         "rx_frag_alloc_cnt[%u]",
49         "rx_dropped_pkt[%u]", "rx_copybreak_pkt[%u]", "rx_copied_pkt[%u]",
50         "rx_queue_drop_cnt[%u]", "rx_no_buffers_posted[%u]",
51         "rx_drops_packet_over_mru[%u]", "rx_drops_invalid_checksum[%u]",
52 };
53
54 static const char gve_gstrings_tx_stats[][ETH_GSTRING_LEN] = {
55         "tx_posted_desc[%u]", "tx_completed_desc[%u]", "tx_consumed_desc[%u]", "tx_bytes[%u]",
56         "tx_wake[%u]", "tx_stop[%u]", "tx_event_counter[%u]",
57         "tx_dma_mapping_error[%u]",
58 };
59
60 static const char gve_gstrings_adminq_stats[][ETH_GSTRING_LEN] = {
61         "adminq_prod_cnt", "adminq_cmd_fail", "adminq_timeouts",
62         "adminq_describe_device_cnt", "adminq_cfg_device_resources_cnt",
63         "adminq_register_page_list_cnt", "adminq_unregister_page_list_cnt",
64         "adminq_create_tx_queue_cnt", "adminq_create_rx_queue_cnt",
65         "adminq_destroy_tx_queue_cnt", "adminq_destroy_rx_queue_cnt",
66         "adminq_dcfg_device_resources_cnt", "adminq_set_driver_parameter_cnt",
67         "adminq_report_stats_cnt", "adminq_report_link_speed_cnt"
68 };
69
70 static const char gve_gstrings_priv_flags[][ETH_GSTRING_LEN] = {
71         "report-stats",
72 };
73
74 #define GVE_MAIN_STATS_LEN  ARRAY_SIZE(gve_gstrings_main_stats)
75 #define GVE_ADMINQ_STATS_LEN  ARRAY_SIZE(gve_gstrings_adminq_stats)
76 #define NUM_GVE_TX_CNTS ARRAY_SIZE(gve_gstrings_tx_stats)
77 #define NUM_GVE_RX_CNTS ARRAY_SIZE(gve_gstrings_rx_stats)
78 #define GVE_PRIV_FLAGS_STR_LEN ARRAY_SIZE(gve_gstrings_priv_flags)
79
80 static void gve_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
81 {
82         struct gve_priv *priv = netdev_priv(netdev);
83         char *s = (char *)data;
84         int i, j;
85
86         switch (stringset) {
87         case ETH_SS_STATS:
88                 memcpy(s, *gve_gstrings_main_stats,
89                        sizeof(gve_gstrings_main_stats));
90                 s += sizeof(gve_gstrings_main_stats);
91
92                 for (i = 0; i < priv->rx_cfg.num_queues; i++) {
93                         for (j = 0; j < NUM_GVE_RX_CNTS; j++) {
94                                 snprintf(s, ETH_GSTRING_LEN,
95                                          gve_gstrings_rx_stats[j], i);
96                                 s += ETH_GSTRING_LEN;
97                         }
98                 }
99
100                 for (i = 0; i < priv->tx_cfg.num_queues; i++) {
101                         for (j = 0; j < NUM_GVE_TX_CNTS; j++) {
102                                 snprintf(s, ETH_GSTRING_LEN,
103                                          gve_gstrings_tx_stats[j], i);
104                                 s += ETH_GSTRING_LEN;
105                         }
106                 }
107
108                 memcpy(s, *gve_gstrings_adminq_stats,
109                        sizeof(gve_gstrings_adminq_stats));
110                 s += sizeof(gve_gstrings_adminq_stats);
111                 break;
112
113         case ETH_SS_PRIV_FLAGS:
114                 memcpy(s, *gve_gstrings_priv_flags,
115                        sizeof(gve_gstrings_priv_flags));
116                 s += sizeof(gve_gstrings_priv_flags);
117                 break;
118
119         default:
120                 break;
121         }
122 }
123
124 static int gve_get_sset_count(struct net_device *netdev, int sset)
125 {
126         struct gve_priv *priv = netdev_priv(netdev);
127
128         switch (sset) {
129         case ETH_SS_STATS:
130                 return GVE_MAIN_STATS_LEN + GVE_ADMINQ_STATS_LEN +
131                        (priv->rx_cfg.num_queues * NUM_GVE_RX_CNTS) +
132                        (priv->tx_cfg.num_queues * NUM_GVE_TX_CNTS);
133         case ETH_SS_PRIV_FLAGS:
134                 return GVE_PRIV_FLAGS_STR_LEN;
135         default:
136                 return -EOPNOTSUPP;
137         }
138 }
139
140 static void
141 gve_get_ethtool_stats(struct net_device *netdev,
142                       struct ethtool_stats *stats, u64 *data)
143 {
144         u64 tmp_rx_pkts, tmp_rx_bytes, tmp_rx_skb_alloc_fail,
145                 tmp_rx_buf_alloc_fail, tmp_rx_desc_err_dropped_pkt,
146                 tmp_tx_pkts, tmp_tx_bytes;
147         u64 rx_buf_alloc_fail, rx_desc_err_dropped_pkt, rx_pkts,
148                 rx_skb_alloc_fail, rx_bytes, tx_pkts, tx_bytes, tx_dropped;
149         int stats_idx, base_stats_idx, max_stats_idx;
150         struct stats *report_stats;
151         int *rx_qid_to_stats_idx;
152         int *tx_qid_to_stats_idx;
153         struct gve_priv *priv;
154         bool skip_nic_stats;
155         unsigned int start;
156         int ring;
157         int i, j;
158
159         ASSERT_RTNL();
160
161         priv = netdev_priv(netdev);
162         report_stats = priv->stats_report->stats;
163         rx_qid_to_stats_idx = kmalloc_array(priv->rx_cfg.num_queues,
164                                             sizeof(int), GFP_KERNEL);
165         if (!rx_qid_to_stats_idx)
166                 return;
167         tx_qid_to_stats_idx = kmalloc_array(priv->tx_cfg.num_queues,
168                                             sizeof(int), GFP_KERNEL);
169         if (!tx_qid_to_stats_idx) {
170                 kfree(rx_qid_to_stats_idx);
171                 return;
172         }
173         for (rx_pkts = 0, rx_bytes = 0, rx_skb_alloc_fail = 0,
174              rx_buf_alloc_fail = 0, rx_desc_err_dropped_pkt = 0, ring = 0;
175              ring < priv->rx_cfg.num_queues; ring++) {
176                 if (priv->rx) {
177                         do {
178                                 struct gve_rx_ring *rx = &priv->rx[ring];
179
180                                 start =
181                                   u64_stats_fetch_begin(&priv->rx[ring].statss);
182                                 tmp_rx_pkts = rx->rpackets;
183                                 tmp_rx_bytes = rx->rbytes;
184                                 tmp_rx_skb_alloc_fail = rx->rx_skb_alloc_fail;
185                                 tmp_rx_buf_alloc_fail = rx->rx_buf_alloc_fail;
186                                 tmp_rx_desc_err_dropped_pkt =
187                                         rx->rx_desc_err_dropped_pkt;
188                         } while (u64_stats_fetch_retry(&priv->rx[ring].statss,
189                                                        start));
190                         rx_pkts += tmp_rx_pkts;
191                         rx_bytes += tmp_rx_bytes;
192                         rx_skb_alloc_fail += tmp_rx_skb_alloc_fail;
193                         rx_buf_alloc_fail += tmp_rx_buf_alloc_fail;
194                         rx_desc_err_dropped_pkt += tmp_rx_desc_err_dropped_pkt;
195                 }
196         }
197         for (tx_pkts = 0, tx_bytes = 0, tx_dropped = 0, ring = 0;
198              ring < priv->tx_cfg.num_queues; ring++) {
199                 if (priv->tx) {
200                         do {
201                                 start =
202                                   u64_stats_fetch_begin(&priv->tx[ring].statss);
203                                 tmp_tx_pkts = priv->tx[ring].pkt_done;
204                                 tmp_tx_bytes = priv->tx[ring].bytes_done;
205                         } while (u64_stats_fetch_retry(&priv->tx[ring].statss,
206                                                        start));
207                         tx_pkts += tmp_tx_pkts;
208                         tx_bytes += tmp_tx_bytes;
209                         tx_dropped += priv->tx[ring].dropped_pkt;
210                 }
211         }
212
213         i = 0;
214         data[i++] = rx_pkts;
215         data[i++] = tx_pkts;
216         data[i++] = rx_bytes;
217         data[i++] = tx_bytes;
218         /* total rx dropped packets */
219         data[i++] = rx_skb_alloc_fail + rx_buf_alloc_fail +
220                     rx_desc_err_dropped_pkt;
221         data[i++] = tx_dropped;
222         data[i++] = priv->tx_timeo_cnt;
223         data[i++] = rx_skb_alloc_fail;
224         data[i++] = rx_buf_alloc_fail;
225         data[i++] = rx_desc_err_dropped_pkt;
226         data[i++] = priv->interface_up_cnt;
227         data[i++] = priv->interface_down_cnt;
228         data[i++] = priv->reset_cnt;
229         data[i++] = priv->page_alloc_fail;
230         data[i++] = priv->dma_mapping_error;
231         data[i++] = priv->stats_report_trigger_cnt;
232         i = GVE_MAIN_STATS_LEN;
233
234         /* For rx cross-reporting stats, start from nic rx stats in report */
235         base_stats_idx = GVE_TX_STATS_REPORT_NUM * priv->tx_cfg.num_queues +
236                 GVE_RX_STATS_REPORT_NUM * priv->rx_cfg.num_queues;
237         max_stats_idx = NIC_RX_STATS_REPORT_NUM * priv->rx_cfg.num_queues +
238                 base_stats_idx;
239         /* Preprocess the stats report for rx, map queue id to start index */
240         skip_nic_stats = false;
241         for (stats_idx = base_stats_idx; stats_idx < max_stats_idx;
242                 stats_idx += NIC_RX_STATS_REPORT_NUM) {
243                 u32 stat_name = be32_to_cpu(report_stats[stats_idx].stat_name);
244                 u32 queue_id = be32_to_cpu(report_stats[stats_idx].queue_id);
245
246                 if (stat_name == 0) {
247                         /* no stats written by NIC yet */
248                         skip_nic_stats = true;
249                         break;
250                 }
251                 rx_qid_to_stats_idx[queue_id] = stats_idx;
252         }
253         /* walk RX rings */
254         if (priv->rx) {
255                 for (ring = 0; ring < priv->rx_cfg.num_queues; ring++) {
256                         struct gve_rx_ring *rx = &priv->rx[ring];
257
258                         data[i++] = rx->fill_cnt;
259                         data[i++] = rx->cnt;
260                         data[i++] = rx->fill_cnt - rx->cnt;
261                         do {
262                                 start =
263                                   u64_stats_fetch_begin(&priv->rx[ring].statss);
264                                 tmp_rx_bytes = rx->rbytes;
265                                 tmp_rx_skb_alloc_fail = rx->rx_skb_alloc_fail;
266                                 tmp_rx_buf_alloc_fail = rx->rx_buf_alloc_fail;
267                                 tmp_rx_desc_err_dropped_pkt =
268                                         rx->rx_desc_err_dropped_pkt;
269                         } while (u64_stats_fetch_retry(&priv->rx[ring].statss,
270                                                        start));
271                         data[i++] = tmp_rx_bytes;
272                         data[i++] = rx->rx_cont_packet_cnt;
273                         data[i++] = rx->rx_frag_flip_cnt;
274                         data[i++] = rx->rx_frag_copy_cnt;
275                         data[i++] = rx->rx_frag_alloc_cnt;
276                         /* rx dropped packets */
277                         data[i++] = tmp_rx_skb_alloc_fail +
278                                 tmp_rx_buf_alloc_fail +
279                                 tmp_rx_desc_err_dropped_pkt;
280                         data[i++] = rx->rx_copybreak_pkt;
281                         data[i++] = rx->rx_copied_pkt;
282                         /* stats from NIC */
283                         if (skip_nic_stats) {
284                                 /* skip NIC rx stats */
285                                 i += NIC_RX_STATS_REPORT_NUM;
286                                 continue;
287                         }
288                         for (j = 0; j < NIC_RX_STATS_REPORT_NUM; j++) {
289                                 u64 value =
290                                 be64_to_cpu(report_stats[rx_qid_to_stats_idx[ring] + j].value);
291
292                                 data[i++] = value;
293                         }
294                 }
295         } else {
296                 i += priv->rx_cfg.num_queues * NUM_GVE_RX_CNTS;
297         }
298
299         /* For tx cross-reporting stats, start from nic tx stats in report */
300         base_stats_idx = max_stats_idx;
301         max_stats_idx = NIC_TX_STATS_REPORT_NUM * priv->tx_cfg.num_queues +
302                 max_stats_idx;
303         /* Preprocess the stats report for tx, map queue id to start index */
304         skip_nic_stats = false;
305         for (stats_idx = base_stats_idx; stats_idx < max_stats_idx;
306                 stats_idx += NIC_TX_STATS_REPORT_NUM) {
307                 u32 stat_name = be32_to_cpu(report_stats[stats_idx].stat_name);
308                 u32 queue_id = be32_to_cpu(report_stats[stats_idx].queue_id);
309
310                 if (stat_name == 0) {
311                         /* no stats written by NIC yet */
312                         skip_nic_stats = true;
313                         break;
314                 }
315                 tx_qid_to_stats_idx[queue_id] = stats_idx;
316         }
317         /* walk TX rings */
318         if (priv->tx) {
319                 for (ring = 0; ring < priv->tx_cfg.num_queues; ring++) {
320                         struct gve_tx_ring *tx = &priv->tx[ring];
321
322                         if (gve_is_gqi(priv)) {
323                                 data[i++] = tx->req;
324                                 data[i++] = tx->done;
325                                 data[i++] = tx->req - tx->done;
326                         } else {
327                                 /* DQO doesn't currently support
328                                  * posted/completed descriptor counts;
329                                  */
330                                 data[i++] = 0;
331                                 data[i++] = 0;
332                                 data[i++] = tx->dqo_tx.tail - tx->dqo_tx.head;
333                         }
334                         do {
335                                 start =
336                                   u64_stats_fetch_begin(&priv->tx[ring].statss);
337                                 tmp_tx_bytes = tx->bytes_done;
338                         } while (u64_stats_fetch_retry(&priv->tx[ring].statss,
339                                                        start));
340                         data[i++] = tmp_tx_bytes;
341                         data[i++] = tx->wake_queue;
342                         data[i++] = tx->stop_queue;
343                         data[i++] = gve_tx_load_event_counter(priv, tx);
344                         data[i++] = tx->dma_mapping_error;
345                         /* stats from NIC */
346                         if (skip_nic_stats) {
347                                 /* skip NIC tx stats */
348                                 i += NIC_TX_STATS_REPORT_NUM;
349                                 continue;
350                         }
351                         for (j = 0; j < NIC_TX_STATS_REPORT_NUM; j++) {
352                                 u64 value =
353                                 be64_to_cpu(report_stats[tx_qid_to_stats_idx[ring] + j].value);
354                                 data[i++] = value;
355                         }
356                 }
357         } else {
358                 i += priv->tx_cfg.num_queues * NUM_GVE_TX_CNTS;
359         }
360
361         kfree(rx_qid_to_stats_idx);
362         kfree(tx_qid_to_stats_idx);
363         /* AQ Stats */
364         data[i++] = priv->adminq_prod_cnt;
365         data[i++] = priv->adminq_cmd_fail;
366         data[i++] = priv->adminq_timeouts;
367         data[i++] = priv->adminq_describe_device_cnt;
368         data[i++] = priv->adminq_cfg_device_resources_cnt;
369         data[i++] = priv->adminq_register_page_list_cnt;
370         data[i++] = priv->adminq_unregister_page_list_cnt;
371         data[i++] = priv->adminq_create_tx_queue_cnt;
372         data[i++] = priv->adminq_create_rx_queue_cnt;
373         data[i++] = priv->adminq_destroy_tx_queue_cnt;
374         data[i++] = priv->adminq_destroy_rx_queue_cnt;
375         data[i++] = priv->adminq_dcfg_device_resources_cnt;
376         data[i++] = priv->adminq_set_driver_parameter_cnt;
377         data[i++] = priv->adminq_report_stats_cnt;
378         data[i++] = priv->adminq_report_link_speed_cnt;
379 }
380
381 static void gve_get_channels(struct net_device *netdev,
382                              struct ethtool_channels *cmd)
383 {
384         struct gve_priv *priv = netdev_priv(netdev);
385
386         cmd->max_rx = priv->rx_cfg.max_queues;
387         cmd->max_tx = priv->tx_cfg.max_queues;
388         cmd->max_other = 0;
389         cmd->max_combined = 0;
390         cmd->rx_count = priv->rx_cfg.num_queues;
391         cmd->tx_count = priv->tx_cfg.num_queues;
392         cmd->other_count = 0;
393         cmd->combined_count = 0;
394 }
395
396 static int gve_set_channels(struct net_device *netdev,
397                             struct ethtool_channels *cmd)
398 {
399         struct gve_priv *priv = netdev_priv(netdev);
400         struct gve_queue_config new_tx_cfg = priv->tx_cfg;
401         struct gve_queue_config new_rx_cfg = priv->rx_cfg;
402         struct ethtool_channels old_settings;
403         int new_tx = cmd->tx_count;
404         int new_rx = cmd->rx_count;
405
406         gve_get_channels(netdev, &old_settings);
407
408         /* Changing combined is not allowed */
409         if (cmd->combined_count != old_settings.combined_count)
410                 return -EINVAL;
411
412         if (!new_rx || !new_tx)
413                 return -EINVAL;
414
415         if (!netif_carrier_ok(netdev)) {
416                 priv->tx_cfg.num_queues = new_tx;
417                 priv->rx_cfg.num_queues = new_rx;
418                 return 0;
419         }
420
421         new_tx_cfg.num_queues = new_tx;
422         new_rx_cfg.num_queues = new_rx;
423
424         return gve_adjust_queues(priv, new_rx_cfg, new_tx_cfg);
425 }
426
427 static void gve_get_ringparam(struct net_device *netdev,
428                               struct ethtool_ringparam *cmd,
429                               struct kernel_ethtool_ringparam *kernel_cmd,
430                               struct netlink_ext_ack *extack)
431 {
432         struct gve_priv *priv = netdev_priv(netdev);
433
434         cmd->rx_max_pending = priv->rx_desc_cnt;
435         cmd->tx_max_pending = priv->tx_desc_cnt;
436         cmd->rx_pending = priv->rx_desc_cnt;
437         cmd->tx_pending = priv->tx_desc_cnt;
438 }
439
440 static int gve_user_reset(struct net_device *netdev, u32 *flags)
441 {
442         struct gve_priv *priv = netdev_priv(netdev);
443
444         if (*flags == ETH_RESET_ALL) {
445                 *flags = 0;
446                 return gve_reset(priv, true);
447         }
448
449         return -EOPNOTSUPP;
450 }
451
452 static int gve_get_tunable(struct net_device *netdev,
453                            const struct ethtool_tunable *etuna, void *value)
454 {
455         struct gve_priv *priv = netdev_priv(netdev);
456
457         switch (etuna->id) {
458         case ETHTOOL_RX_COPYBREAK:
459                 *(u32 *)value = priv->rx_copybreak;
460                 return 0;
461         default:
462                 return -EOPNOTSUPP;
463         }
464 }
465
466 static int gve_set_tunable(struct net_device *netdev,
467                            const struct ethtool_tunable *etuna,
468                            const void *value)
469 {
470         struct gve_priv *priv = netdev_priv(netdev);
471         u32 len;
472
473         switch (etuna->id) {
474         case ETHTOOL_RX_COPYBREAK:
475         {
476                 u32 max_copybreak = gve_is_gqi(priv) ?
477                         (PAGE_SIZE / 2) : priv->data_buffer_size_dqo;
478
479                 len = *(u32 *)value;
480                 if (len > max_copybreak)
481                         return -EINVAL;
482                 priv->rx_copybreak = len;
483                 return 0;
484         }
485         default:
486                 return -EOPNOTSUPP;
487         }
488 }
489
490 static u32 gve_get_priv_flags(struct net_device *netdev)
491 {
492         struct gve_priv *priv = netdev_priv(netdev);
493         u32 ret_flags = 0;
494
495         /* Only 1 flag exists currently: report-stats (BIT(O)), so set that flag. */
496         if (priv->ethtool_flags & BIT(0))
497                 ret_flags |= BIT(0);
498         return ret_flags;
499 }
500
501 static int gve_set_priv_flags(struct net_device *netdev, u32 flags)
502 {
503         struct gve_priv *priv = netdev_priv(netdev);
504         u64 ori_flags, new_flags;
505
506         ori_flags = READ_ONCE(priv->ethtool_flags);
507         new_flags = ori_flags;
508
509         /* Only one priv flag exists: report-stats (BIT(0))*/
510         if (flags & BIT(0))
511                 new_flags |= BIT(0);
512         else
513                 new_flags &= ~(BIT(0));
514         priv->ethtool_flags = new_flags;
515         /* start report-stats timer when user turns report stats on. */
516         if (flags & BIT(0)) {
517                 mod_timer(&priv->stats_report_timer,
518                           round_jiffies(jiffies +
519                                         msecs_to_jiffies(priv->stats_report_timer_period)));
520         }
521         /* Zero off gve stats when report-stats turned off and */
522         /* delete report stats timer. */
523         if (!(flags & BIT(0)) && (ori_flags & BIT(0))) {
524                 int tx_stats_num = GVE_TX_STATS_REPORT_NUM *
525                         priv->tx_cfg.num_queues;
526                 int rx_stats_num = GVE_RX_STATS_REPORT_NUM *
527                         priv->rx_cfg.num_queues;
528
529                 memset(priv->stats_report->stats, 0, (tx_stats_num + rx_stats_num) *
530                                    sizeof(struct stats));
531                 del_timer_sync(&priv->stats_report_timer);
532         }
533         return 0;
534 }
535
536 static int gve_get_link_ksettings(struct net_device *netdev,
537                                   struct ethtool_link_ksettings *cmd)
538 {
539         struct gve_priv *priv = netdev_priv(netdev);
540         int err = gve_adminq_report_link_speed(priv);
541
542         cmd->base.speed = priv->link_speed;
543         return err;
544 }
545
546 static int gve_get_coalesce(struct net_device *netdev,
547                             struct ethtool_coalesce *ec,
548                             struct kernel_ethtool_coalesce *kernel_ec,
549                             struct netlink_ext_ack *extack)
550 {
551         struct gve_priv *priv = netdev_priv(netdev);
552
553         if (gve_is_gqi(priv))
554                 return -EOPNOTSUPP;
555         ec->tx_coalesce_usecs = priv->tx_coalesce_usecs;
556         ec->rx_coalesce_usecs = priv->rx_coalesce_usecs;
557
558         return 0;
559 }
560
561 static int gve_set_coalesce(struct net_device *netdev,
562                             struct ethtool_coalesce *ec,
563                             struct kernel_ethtool_coalesce *kernel_ec,
564                             struct netlink_ext_ack *extack)
565 {
566         struct gve_priv *priv = netdev_priv(netdev);
567         u32 tx_usecs_orig = priv->tx_coalesce_usecs;
568         u32 rx_usecs_orig = priv->rx_coalesce_usecs;
569         int idx;
570
571         if (gve_is_gqi(priv))
572                 return -EOPNOTSUPP;
573
574         if (ec->tx_coalesce_usecs > GVE_MAX_ITR_INTERVAL_DQO ||
575             ec->rx_coalesce_usecs > GVE_MAX_ITR_INTERVAL_DQO)
576                 return -EINVAL;
577         priv->tx_coalesce_usecs = ec->tx_coalesce_usecs;
578         priv->rx_coalesce_usecs = ec->rx_coalesce_usecs;
579
580         if (tx_usecs_orig != priv->tx_coalesce_usecs) {
581                 for (idx = 0; idx < priv->tx_cfg.num_queues; idx++) {
582                         int ntfy_idx = gve_tx_idx_to_ntfy(priv, idx);
583                         struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
584
585                         gve_set_itr_coalesce_usecs_dqo(priv, block,
586                                                        priv->tx_coalesce_usecs);
587                 }
588         }
589
590         if (rx_usecs_orig != priv->rx_coalesce_usecs) {
591                 for (idx = 0; idx < priv->rx_cfg.num_queues; idx++) {
592                         int ntfy_idx = gve_rx_idx_to_ntfy(priv, idx);
593                         struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
594
595                         gve_set_itr_coalesce_usecs_dqo(priv, block,
596                                                        priv->rx_coalesce_usecs);
597                 }
598         }
599
600         return 0;
601 }
602
603 const struct ethtool_ops gve_ethtool_ops = {
604         .supported_coalesce_params = ETHTOOL_COALESCE_USECS,
605         .get_drvinfo = gve_get_drvinfo,
606         .get_strings = gve_get_strings,
607         .get_sset_count = gve_get_sset_count,
608         .get_ethtool_stats = gve_get_ethtool_stats,
609         .set_msglevel = gve_set_msglevel,
610         .get_msglevel = gve_get_msglevel,
611         .set_channels = gve_set_channels,
612         .get_channels = gve_get_channels,
613         .get_link = ethtool_op_get_link,
614         .get_coalesce = gve_get_coalesce,
615         .set_coalesce = gve_set_coalesce,
616         .get_ringparam = gve_get_ringparam,
617         .reset = gve_user_reset,
618         .get_tunable = gve_get_tunable,
619         .set_tunable = gve_set_tunable,
620         .get_priv_flags = gve_get_priv_flags,
621         .set_priv_flags = gve_set_priv_flags,
622         .get_link_ksettings = gve_get_link_ksettings
623 };