29c5f994f92e0f0cb3d1c50937f10b937ac30e7c
[linux-2.6-block.git] / drivers / net / ethernet / google / gve / gve_main.c
1 // SPDX-License-Identifier: (GPL-2.0 OR MIT)
2 /* Google virtual Ethernet (gve) driver
3  *
4  * Copyright (C) 2015-2021 Google, Inc.
5  */
6
7 #include <linux/cpumask.h>
8 #include <linux/etherdevice.h>
9 #include <linux/interrupt.h>
10 #include <linux/module.h>
11 #include <linux/pci.h>
12 #include <linux/sched.h>
13 #include <linux/timer.h>
14 #include <linux/workqueue.h>
15 #include <net/sch_generic.h>
16 #include "gve.h"
17 #include "gve_dqo.h"
18 #include "gve_adminq.h"
19 #include "gve_register.h"
20
21 #define GVE_DEFAULT_RX_COPYBREAK        (256)
22
23 #define DEFAULT_MSG_LEVEL       (NETIF_MSG_DRV | NETIF_MSG_LINK)
24 #define GVE_VERSION             "1.0.0"
25 #define GVE_VERSION_PREFIX      "GVE-"
26
27 const char gve_version_str[] = GVE_VERSION;
28 static const char gve_version_prefix[] = GVE_VERSION_PREFIX;
29
30 static netdev_tx_t gve_start_xmit(struct sk_buff *skb, struct net_device *dev)
31 {
32         struct gve_priv *priv = netdev_priv(dev);
33
34         if (gve_is_gqi(priv))
35                 return gve_tx(skb, dev);
36         else
37                 return gve_tx_dqo(skb, dev);
38 }
39
40 static void gve_get_stats(struct net_device *dev, struct rtnl_link_stats64 *s)
41 {
42         struct gve_priv *priv = netdev_priv(dev);
43         unsigned int start;
44         int ring;
45
46         if (priv->rx) {
47                 for (ring = 0; ring < priv->rx_cfg.num_queues; ring++) {
48                         do {
49                                 start =
50                                   u64_stats_fetch_begin(&priv->rx[ring].statss);
51                                 s->rx_packets += priv->rx[ring].rpackets;
52                                 s->rx_bytes += priv->rx[ring].rbytes;
53                         } while (u64_stats_fetch_retry(&priv->rx[ring].statss,
54                                                        start));
55                 }
56         }
57         if (priv->tx) {
58                 for (ring = 0; ring < priv->tx_cfg.num_queues; ring++) {
59                         do {
60                                 start =
61                                   u64_stats_fetch_begin(&priv->tx[ring].statss);
62                                 s->tx_packets += priv->tx[ring].pkt_done;
63                                 s->tx_bytes += priv->tx[ring].bytes_done;
64                         } while (u64_stats_fetch_retry(&priv->tx[ring].statss,
65                                                        start));
66                 }
67         }
68 }
69
70 static int gve_alloc_counter_array(struct gve_priv *priv)
71 {
72         priv->counter_array =
73                 dma_alloc_coherent(&priv->pdev->dev,
74                                    priv->num_event_counters *
75                                    sizeof(*priv->counter_array),
76                                    &priv->counter_array_bus, GFP_KERNEL);
77         if (!priv->counter_array)
78                 return -ENOMEM;
79
80         return 0;
81 }
82
83 static void gve_free_counter_array(struct gve_priv *priv)
84 {
85         if (!priv->counter_array)
86                 return;
87
88         dma_free_coherent(&priv->pdev->dev,
89                           priv->num_event_counters *
90                           sizeof(*priv->counter_array),
91                           priv->counter_array, priv->counter_array_bus);
92         priv->counter_array = NULL;
93 }
94
95 /* NIC requests to report stats */
96 static void gve_stats_report_task(struct work_struct *work)
97 {
98         struct gve_priv *priv = container_of(work, struct gve_priv,
99                                              stats_report_task);
100         if (gve_get_do_report_stats(priv)) {
101                 gve_handle_report_stats(priv);
102                 gve_clear_do_report_stats(priv);
103         }
104 }
105
106 static void gve_stats_report_schedule(struct gve_priv *priv)
107 {
108         if (!gve_get_probe_in_progress(priv) &&
109             !gve_get_reset_in_progress(priv)) {
110                 gve_set_do_report_stats(priv);
111                 queue_work(priv->gve_wq, &priv->stats_report_task);
112         }
113 }
114
115 static void gve_stats_report_timer(struct timer_list *t)
116 {
117         struct gve_priv *priv = from_timer(priv, t, stats_report_timer);
118
119         mod_timer(&priv->stats_report_timer,
120                   round_jiffies(jiffies +
121                   msecs_to_jiffies(priv->stats_report_timer_period)));
122         gve_stats_report_schedule(priv);
123 }
124
125 static int gve_alloc_stats_report(struct gve_priv *priv)
126 {
127         int tx_stats_num, rx_stats_num;
128
129         tx_stats_num = (GVE_TX_STATS_REPORT_NUM + NIC_TX_STATS_REPORT_NUM) *
130                        priv->tx_cfg.num_queues;
131         rx_stats_num = (GVE_RX_STATS_REPORT_NUM + NIC_RX_STATS_REPORT_NUM) *
132                        priv->rx_cfg.num_queues;
133         priv->stats_report_len = struct_size(priv->stats_report, stats,
134                                              tx_stats_num + rx_stats_num);
135         priv->stats_report =
136                 dma_alloc_coherent(&priv->pdev->dev, priv->stats_report_len,
137                                    &priv->stats_report_bus, GFP_KERNEL);
138         if (!priv->stats_report)
139                 return -ENOMEM;
140         /* Set up timer for the report-stats task */
141         timer_setup(&priv->stats_report_timer, gve_stats_report_timer, 0);
142         priv->stats_report_timer_period = GVE_STATS_REPORT_TIMER_PERIOD;
143         return 0;
144 }
145
146 static void gve_free_stats_report(struct gve_priv *priv)
147 {
148         if (!priv->stats_report)
149                 return;
150
151         del_timer_sync(&priv->stats_report_timer);
152         dma_free_coherent(&priv->pdev->dev, priv->stats_report_len,
153                           priv->stats_report, priv->stats_report_bus);
154         priv->stats_report = NULL;
155 }
156
157 static irqreturn_t gve_mgmnt_intr(int irq, void *arg)
158 {
159         struct gve_priv *priv = arg;
160
161         queue_work(priv->gve_wq, &priv->service_task);
162         return IRQ_HANDLED;
163 }
164
165 static irqreturn_t gve_intr(int irq, void *arg)
166 {
167         struct gve_notify_block *block = arg;
168         struct gve_priv *priv = block->priv;
169
170         iowrite32be(GVE_IRQ_MASK, gve_irq_doorbell(priv, block));
171         napi_schedule_irqoff(&block->napi);
172         return IRQ_HANDLED;
173 }
174
175 static irqreturn_t gve_intr_dqo(int irq, void *arg)
176 {
177         struct gve_notify_block *block = arg;
178
179         /* Interrupts are automatically masked */
180         napi_schedule_irqoff(&block->napi);
181         return IRQ_HANDLED;
182 }
183
184 static int gve_napi_poll(struct napi_struct *napi, int budget)
185 {
186         struct gve_notify_block *block;
187         __be32 __iomem *irq_doorbell;
188         bool reschedule = false;
189         struct gve_priv *priv;
190
191         block = container_of(napi, struct gve_notify_block, napi);
192         priv = block->priv;
193
194         if (block->tx)
195                 reschedule |= gve_tx_poll(block, budget);
196         if (block->rx)
197                 reschedule |= gve_rx_poll(block, budget);
198
199         if (reschedule)
200                 return budget;
201
202         napi_complete(napi);
203         irq_doorbell = gve_irq_doorbell(priv, block);
204         iowrite32be(GVE_IRQ_ACK | GVE_IRQ_EVENT, irq_doorbell);
205
206         /* Double check we have no extra work.
207          * Ensure unmask synchronizes with checking for work.
208          */
209         mb();
210         if (block->tx)
211                 reschedule |= gve_tx_poll(block, -1);
212         if (block->rx)
213                 reschedule |= gve_rx_poll(block, -1);
214         if (reschedule && napi_reschedule(napi))
215                 iowrite32be(GVE_IRQ_MASK, irq_doorbell);
216
217         return 0;
218 }
219
220 static int gve_napi_poll_dqo(struct napi_struct *napi, int budget)
221 {
222         struct gve_notify_block *block =
223                 container_of(napi, struct gve_notify_block, napi);
224         struct gve_priv *priv = block->priv;
225         bool reschedule = false;
226         int work_done = 0;
227
228         /* Clear PCI MSI-X Pending Bit Array (PBA)
229          *
230          * This bit is set if an interrupt event occurs while the vector is
231          * masked. If this bit is set and we reenable the interrupt, it will
232          * fire again. Since we're just about to poll the queue state, we don't
233          * need it to fire again.
234          *
235          * Under high softirq load, it's possible that the interrupt condition
236          * is triggered twice before we got the chance to process it.
237          */
238         gve_write_irq_doorbell_dqo(priv, block,
239                                    GVE_ITR_NO_UPDATE_DQO | GVE_ITR_CLEAR_PBA_BIT_DQO);
240
241         if (block->tx)
242                 reschedule |= gve_tx_poll_dqo(block, /*do_clean=*/true);
243
244         if (block->rx) {
245                 work_done = gve_rx_poll_dqo(block, budget);
246                 reschedule |= work_done == budget;
247         }
248
249         if (reschedule)
250                 return budget;
251
252         if (likely(napi_complete_done(napi, work_done))) {
253                 /* Enable interrupts again.
254                  *
255                  * We don't need to repoll afterwards because HW supports the
256                  * PCI MSI-X PBA feature.
257                  *
258                  * Another interrupt would be triggered if a new event came in
259                  * since the last one.
260                  */
261                 gve_write_irq_doorbell_dqo(priv, block,
262                                            GVE_ITR_NO_UPDATE_DQO | GVE_ITR_ENABLE_BIT_DQO);
263         }
264
265         return work_done;
266 }
267
268 static int gve_alloc_notify_blocks(struct gve_priv *priv)
269 {
270         int num_vecs_requested = priv->num_ntfy_blks + 1;
271         char *name = priv->dev->name;
272         unsigned int active_cpus;
273         int vecs_enabled;
274         int i, j;
275         int err;
276
277         priv->msix_vectors = kvzalloc(num_vecs_requested *
278                                       sizeof(*priv->msix_vectors), GFP_KERNEL);
279         if (!priv->msix_vectors)
280                 return -ENOMEM;
281         for (i = 0; i < num_vecs_requested; i++)
282                 priv->msix_vectors[i].entry = i;
283         vecs_enabled = pci_enable_msix_range(priv->pdev, priv->msix_vectors,
284                                              GVE_MIN_MSIX, num_vecs_requested);
285         if (vecs_enabled < 0) {
286                 dev_err(&priv->pdev->dev, "Could not enable min msix %d/%d\n",
287                         GVE_MIN_MSIX, vecs_enabled);
288                 err = vecs_enabled;
289                 goto abort_with_msix_vectors;
290         }
291         if (vecs_enabled != num_vecs_requested) {
292                 int new_num_ntfy_blks = (vecs_enabled - 1) & ~0x1;
293                 int vecs_per_type = new_num_ntfy_blks / 2;
294                 int vecs_left = new_num_ntfy_blks % 2;
295
296                 priv->num_ntfy_blks = new_num_ntfy_blks;
297                 priv->mgmt_msix_idx = priv->num_ntfy_blks;
298                 priv->tx_cfg.max_queues = min_t(int, priv->tx_cfg.max_queues,
299                                                 vecs_per_type);
300                 priv->rx_cfg.max_queues = min_t(int, priv->rx_cfg.max_queues,
301                                                 vecs_per_type + vecs_left);
302                 dev_err(&priv->pdev->dev,
303                         "Could not enable desired msix, only enabled %d, adjusting tx max queues to %d, and rx max queues to %d\n",
304                         vecs_enabled, priv->tx_cfg.max_queues,
305                         priv->rx_cfg.max_queues);
306                 if (priv->tx_cfg.num_queues > priv->tx_cfg.max_queues)
307                         priv->tx_cfg.num_queues = priv->tx_cfg.max_queues;
308                 if (priv->rx_cfg.num_queues > priv->rx_cfg.max_queues)
309                         priv->rx_cfg.num_queues = priv->rx_cfg.max_queues;
310         }
311         /* Half the notification blocks go to TX and half to RX */
312         active_cpus = min_t(int, priv->num_ntfy_blks / 2, num_online_cpus());
313
314         /* Setup Management Vector  - the last vector */
315         snprintf(priv->mgmt_msix_name, sizeof(priv->mgmt_msix_name), "%s-mgmnt",
316                  name);
317         err = request_irq(priv->msix_vectors[priv->mgmt_msix_idx].vector,
318                           gve_mgmnt_intr, 0, priv->mgmt_msix_name, priv);
319         if (err) {
320                 dev_err(&priv->pdev->dev, "Did not receive management vector.\n");
321                 goto abort_with_msix_enabled;
322         }
323         priv->ntfy_blocks =
324                 dma_alloc_coherent(&priv->pdev->dev,
325                                    priv->num_ntfy_blks *
326                                    sizeof(*priv->ntfy_blocks),
327                                    &priv->ntfy_block_bus, GFP_KERNEL);
328         if (!priv->ntfy_blocks) {
329                 err = -ENOMEM;
330                 goto abort_with_mgmt_vector;
331         }
332         /* Setup the other blocks - the first n-1 vectors */
333         for (i = 0; i < priv->num_ntfy_blks; i++) {
334                 struct gve_notify_block *block = &priv->ntfy_blocks[i];
335                 int msix_idx = i;
336
337                 snprintf(block->name, sizeof(block->name), "%s-ntfy-block.%d",
338                          name, i);
339                 block->priv = priv;
340                 err = request_irq(priv->msix_vectors[msix_idx].vector,
341                                   gve_is_gqi(priv) ? gve_intr : gve_intr_dqo,
342                                   0, block->name, block);
343                 if (err) {
344                         dev_err(&priv->pdev->dev,
345                                 "Failed to receive msix vector %d\n", i);
346                         goto abort_with_some_ntfy_blocks;
347                 }
348                 irq_set_affinity_hint(priv->msix_vectors[msix_idx].vector,
349                                       get_cpu_mask(i % active_cpus));
350         }
351         return 0;
352 abort_with_some_ntfy_blocks:
353         for (j = 0; j < i; j++) {
354                 struct gve_notify_block *block = &priv->ntfy_blocks[j];
355                 int msix_idx = j;
356
357                 irq_set_affinity_hint(priv->msix_vectors[msix_idx].vector,
358                                       NULL);
359                 free_irq(priv->msix_vectors[msix_idx].vector, block);
360         }
361         dma_free_coherent(&priv->pdev->dev, priv->num_ntfy_blks *
362                           sizeof(*priv->ntfy_blocks),
363                           priv->ntfy_blocks, priv->ntfy_block_bus);
364         priv->ntfy_blocks = NULL;
365 abort_with_mgmt_vector:
366         free_irq(priv->msix_vectors[priv->mgmt_msix_idx].vector, priv);
367 abort_with_msix_enabled:
368         pci_disable_msix(priv->pdev);
369 abort_with_msix_vectors:
370         kvfree(priv->msix_vectors);
371         priv->msix_vectors = NULL;
372         return err;
373 }
374
375 static void gve_free_notify_blocks(struct gve_priv *priv)
376 {
377         int i;
378
379         if (!priv->msix_vectors)
380                 return;
381
382         /* Free the irqs */
383         for (i = 0; i < priv->num_ntfy_blks; i++) {
384                 struct gve_notify_block *block = &priv->ntfy_blocks[i];
385                 int msix_idx = i;
386
387                 irq_set_affinity_hint(priv->msix_vectors[msix_idx].vector,
388                                       NULL);
389                 free_irq(priv->msix_vectors[msix_idx].vector, block);
390         }
391         free_irq(priv->msix_vectors[priv->mgmt_msix_idx].vector, priv);
392         dma_free_coherent(&priv->pdev->dev,
393                           priv->num_ntfy_blks * sizeof(*priv->ntfy_blocks),
394                           priv->ntfy_blocks, priv->ntfy_block_bus);
395         priv->ntfy_blocks = NULL;
396         pci_disable_msix(priv->pdev);
397         kvfree(priv->msix_vectors);
398         priv->msix_vectors = NULL;
399 }
400
401 static int gve_setup_device_resources(struct gve_priv *priv)
402 {
403         int err;
404
405         err = gve_alloc_counter_array(priv);
406         if (err)
407                 return err;
408         err = gve_alloc_notify_blocks(priv);
409         if (err)
410                 goto abort_with_counter;
411         err = gve_alloc_stats_report(priv);
412         if (err)
413                 goto abort_with_ntfy_blocks;
414         err = gve_adminq_configure_device_resources(priv,
415                                                     priv->counter_array_bus,
416                                                     priv->num_event_counters,
417                                                     priv->ntfy_block_bus,
418                                                     priv->num_ntfy_blks);
419         if (unlikely(err)) {
420                 dev_err(&priv->pdev->dev,
421                         "could not setup device_resources: err=%d\n", err);
422                 err = -ENXIO;
423                 goto abort_with_stats_report;
424         }
425
426         if (priv->queue_format == GVE_DQO_RDA_FORMAT) {
427                 priv->ptype_lut_dqo = kvzalloc(sizeof(*priv->ptype_lut_dqo),
428                                                GFP_KERNEL);
429                 if (!priv->ptype_lut_dqo) {
430                         err = -ENOMEM;
431                         goto abort_with_stats_report;
432                 }
433                 err = gve_adminq_get_ptype_map_dqo(priv, priv->ptype_lut_dqo);
434                 if (err) {
435                         dev_err(&priv->pdev->dev,
436                                 "Failed to get ptype map: err=%d\n", err);
437                         goto abort_with_ptype_lut;
438                 }
439         }
440
441         err = gve_adminq_report_stats(priv, priv->stats_report_len,
442                                       priv->stats_report_bus,
443                                       GVE_STATS_REPORT_TIMER_PERIOD);
444         if (err)
445                 dev_err(&priv->pdev->dev,
446                         "Failed to report stats: err=%d\n", err);
447         gve_set_device_resources_ok(priv);
448         return 0;
449
450 abort_with_ptype_lut:
451         kvfree(priv->ptype_lut_dqo);
452         priv->ptype_lut_dqo = NULL;
453 abort_with_stats_report:
454         gve_free_stats_report(priv);
455 abort_with_ntfy_blocks:
456         gve_free_notify_blocks(priv);
457 abort_with_counter:
458         gve_free_counter_array(priv);
459
460         return err;
461 }
462
463 static void gve_trigger_reset(struct gve_priv *priv);
464
465 static void gve_teardown_device_resources(struct gve_priv *priv)
466 {
467         int err;
468
469         /* Tell device its resources are being freed */
470         if (gve_get_device_resources_ok(priv)) {
471                 /* detach the stats report */
472                 err = gve_adminq_report_stats(priv, 0, 0x0, GVE_STATS_REPORT_TIMER_PERIOD);
473                 if (err) {
474                         dev_err(&priv->pdev->dev,
475                                 "Failed to detach stats report: err=%d\n", err);
476                         gve_trigger_reset(priv);
477                 }
478                 err = gve_adminq_deconfigure_device_resources(priv);
479                 if (err) {
480                         dev_err(&priv->pdev->dev,
481                                 "Could not deconfigure device resources: err=%d\n",
482                                 err);
483                         gve_trigger_reset(priv);
484                 }
485         }
486
487         kvfree(priv->ptype_lut_dqo);
488         priv->ptype_lut_dqo = NULL;
489
490         gve_free_counter_array(priv);
491         gve_free_notify_blocks(priv);
492         gve_free_stats_report(priv);
493         gve_clear_device_resources_ok(priv);
494 }
495
496 static void gve_add_napi(struct gve_priv *priv, int ntfy_idx,
497                          int (*gve_poll)(struct napi_struct *, int))
498 {
499         struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
500
501         netif_napi_add(priv->dev, &block->napi, gve_poll,
502                        NAPI_POLL_WEIGHT);
503 }
504
505 static void gve_remove_napi(struct gve_priv *priv, int ntfy_idx)
506 {
507         struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
508
509         netif_napi_del(&block->napi);
510 }
511
512 static int gve_register_qpls(struct gve_priv *priv)
513 {
514         int num_qpls = gve_num_tx_qpls(priv) + gve_num_rx_qpls(priv);
515         int err;
516         int i;
517
518         for (i = 0; i < num_qpls; i++) {
519                 err = gve_adminq_register_page_list(priv, &priv->qpls[i]);
520                 if (err) {
521                         netif_err(priv, drv, priv->dev,
522                                   "failed to register queue page list %d\n",
523                                   priv->qpls[i].id);
524                         /* This failure will trigger a reset - no need to clean
525                          * up
526                          */
527                         return err;
528                 }
529         }
530         return 0;
531 }
532
533 static int gve_unregister_qpls(struct gve_priv *priv)
534 {
535         int num_qpls = gve_num_tx_qpls(priv) + gve_num_rx_qpls(priv);
536         int err;
537         int i;
538
539         for (i = 0; i < num_qpls; i++) {
540                 err = gve_adminq_unregister_page_list(priv, priv->qpls[i].id);
541                 /* This failure will trigger a reset - no need to clean up */
542                 if (err) {
543                         netif_err(priv, drv, priv->dev,
544                                   "Failed to unregister queue page list %d\n",
545                                   priv->qpls[i].id);
546                         return err;
547                 }
548         }
549         return 0;
550 }
551
552 static int gve_create_rings(struct gve_priv *priv)
553 {
554         int err;
555         int i;
556
557         err = gve_adminq_create_tx_queues(priv, priv->tx_cfg.num_queues);
558         if (err) {
559                 netif_err(priv, drv, priv->dev, "failed to create %d tx queues\n",
560                           priv->tx_cfg.num_queues);
561                 /* This failure will trigger a reset - no need to clean
562                  * up
563                  */
564                 return err;
565         }
566         netif_dbg(priv, drv, priv->dev, "created %d tx queues\n",
567                   priv->tx_cfg.num_queues);
568
569         err = gve_adminq_create_rx_queues(priv, priv->rx_cfg.num_queues);
570         if (err) {
571                 netif_err(priv, drv, priv->dev, "failed to create %d rx queues\n",
572                           priv->rx_cfg.num_queues);
573                 /* This failure will trigger a reset - no need to clean
574                  * up
575                  */
576                 return err;
577         }
578         netif_dbg(priv, drv, priv->dev, "created %d rx queues\n",
579                   priv->rx_cfg.num_queues);
580
581         if (gve_is_gqi(priv)) {
582                 /* Rx data ring has been prefilled with packet buffers at queue
583                  * allocation time.
584                  *
585                  * Write the doorbell to provide descriptor slots and packet
586                  * buffers to the NIC.
587                  */
588                 for (i = 0; i < priv->rx_cfg.num_queues; i++)
589                         gve_rx_write_doorbell(priv, &priv->rx[i]);
590         } else {
591                 for (i = 0; i < priv->rx_cfg.num_queues; i++) {
592                         /* Post buffers and ring doorbell. */
593                         gve_rx_post_buffers_dqo(&priv->rx[i]);
594                 }
595         }
596
597         return 0;
598 }
599
600 static void add_napi_init_sync_stats(struct gve_priv *priv,
601                                      int (*napi_poll)(struct napi_struct *napi,
602                                                       int budget))
603 {
604         int i;
605
606         /* Add tx napi & init sync stats*/
607         for (i = 0; i < priv->tx_cfg.num_queues; i++) {
608                 int ntfy_idx = gve_tx_idx_to_ntfy(priv, i);
609
610                 u64_stats_init(&priv->tx[i].statss);
611                 priv->tx[i].ntfy_id = ntfy_idx;
612                 gve_add_napi(priv, ntfy_idx, napi_poll);
613         }
614         /* Add rx napi  & init sync stats*/
615         for (i = 0; i < priv->rx_cfg.num_queues; i++) {
616                 int ntfy_idx = gve_rx_idx_to_ntfy(priv, i);
617
618                 u64_stats_init(&priv->rx[i].statss);
619                 priv->rx[i].ntfy_id = ntfy_idx;
620                 gve_add_napi(priv, ntfy_idx, napi_poll);
621         }
622 }
623
624 static void gve_tx_free_rings(struct gve_priv *priv)
625 {
626         if (gve_is_gqi(priv)) {
627                 gve_tx_free_rings_gqi(priv);
628         } else {
629                 gve_tx_free_rings_dqo(priv);
630         }
631 }
632
633 static int gve_alloc_rings(struct gve_priv *priv)
634 {
635         int err;
636
637         /* Setup tx rings */
638         priv->tx = kvzalloc(priv->tx_cfg.num_queues * sizeof(*priv->tx),
639                             GFP_KERNEL);
640         if (!priv->tx)
641                 return -ENOMEM;
642
643         if (gve_is_gqi(priv))
644                 err = gve_tx_alloc_rings(priv);
645         else
646                 err = gve_tx_alloc_rings_dqo(priv);
647         if (err)
648                 goto free_tx;
649
650         /* Setup rx rings */
651         priv->rx = kvzalloc(priv->rx_cfg.num_queues * sizeof(*priv->rx),
652                             GFP_KERNEL);
653         if (!priv->rx) {
654                 err = -ENOMEM;
655                 goto free_tx_queue;
656         }
657
658         if (gve_is_gqi(priv))
659                 err = gve_rx_alloc_rings(priv);
660         else
661                 err = gve_rx_alloc_rings_dqo(priv);
662         if (err)
663                 goto free_rx;
664
665         if (gve_is_gqi(priv))
666                 add_napi_init_sync_stats(priv, gve_napi_poll);
667         else
668                 add_napi_init_sync_stats(priv, gve_napi_poll_dqo);
669
670         return 0;
671
672 free_rx:
673         kvfree(priv->rx);
674         priv->rx = NULL;
675 free_tx_queue:
676         gve_tx_free_rings(priv);
677 free_tx:
678         kvfree(priv->tx);
679         priv->tx = NULL;
680         return err;
681 }
682
683 static int gve_destroy_rings(struct gve_priv *priv)
684 {
685         int err;
686
687         err = gve_adminq_destroy_tx_queues(priv, priv->tx_cfg.num_queues);
688         if (err) {
689                 netif_err(priv, drv, priv->dev,
690                           "failed to destroy tx queues\n");
691                 /* This failure will trigger a reset - no need to clean up */
692                 return err;
693         }
694         netif_dbg(priv, drv, priv->dev, "destroyed tx queues\n");
695         err = gve_adminq_destroy_rx_queues(priv, priv->rx_cfg.num_queues);
696         if (err) {
697                 netif_err(priv, drv, priv->dev,
698                           "failed to destroy rx queues\n");
699                 /* This failure will trigger a reset - no need to clean up */
700                 return err;
701         }
702         netif_dbg(priv, drv, priv->dev, "destroyed rx queues\n");
703         return 0;
704 }
705
706 static void gve_rx_free_rings(struct gve_priv *priv)
707 {
708         if (gve_is_gqi(priv))
709                 gve_rx_free_rings_gqi(priv);
710         else
711                 gve_rx_free_rings_dqo(priv);
712 }
713
714 static void gve_free_rings(struct gve_priv *priv)
715 {
716         int ntfy_idx;
717         int i;
718
719         if (priv->tx) {
720                 for (i = 0; i < priv->tx_cfg.num_queues; i++) {
721                         ntfy_idx = gve_tx_idx_to_ntfy(priv, i);
722                         gve_remove_napi(priv, ntfy_idx);
723                 }
724                 gve_tx_free_rings(priv);
725                 kvfree(priv->tx);
726                 priv->tx = NULL;
727         }
728         if (priv->rx) {
729                 for (i = 0; i < priv->rx_cfg.num_queues; i++) {
730                         ntfy_idx = gve_rx_idx_to_ntfy(priv, i);
731                         gve_remove_napi(priv, ntfy_idx);
732                 }
733                 gve_rx_free_rings(priv);
734                 kvfree(priv->rx);
735                 priv->rx = NULL;
736         }
737 }
738
739 int gve_alloc_page(struct gve_priv *priv, struct device *dev,
740                    struct page **page, dma_addr_t *dma,
741                    enum dma_data_direction dir)
742 {
743         *page = alloc_page(GFP_KERNEL);
744         if (!*page) {
745                 priv->page_alloc_fail++;
746                 return -ENOMEM;
747         }
748         *dma = dma_map_page(dev, *page, 0, PAGE_SIZE, dir);
749         if (dma_mapping_error(dev, *dma)) {
750                 priv->dma_mapping_error++;
751                 put_page(*page);
752                 return -ENOMEM;
753         }
754         return 0;
755 }
756
757 static int gve_alloc_queue_page_list(struct gve_priv *priv, u32 id,
758                                      int pages)
759 {
760         struct gve_queue_page_list *qpl = &priv->qpls[id];
761         int err;
762         int i;
763
764         if (pages + priv->num_registered_pages > priv->max_registered_pages) {
765                 netif_err(priv, drv, priv->dev,
766                           "Reached max number of registered pages %llu > %llu\n",
767                           pages + priv->num_registered_pages,
768                           priv->max_registered_pages);
769                 return -EINVAL;
770         }
771
772         qpl->id = id;
773         qpl->num_entries = 0;
774         qpl->pages = kvzalloc(pages * sizeof(*qpl->pages), GFP_KERNEL);
775         /* caller handles clean up */
776         if (!qpl->pages)
777                 return -ENOMEM;
778         qpl->page_buses = kvzalloc(pages * sizeof(*qpl->page_buses),
779                                    GFP_KERNEL);
780         /* caller handles clean up */
781         if (!qpl->page_buses)
782                 return -ENOMEM;
783
784         for (i = 0; i < pages; i++) {
785                 err = gve_alloc_page(priv, &priv->pdev->dev, &qpl->pages[i],
786                                      &qpl->page_buses[i],
787                                      gve_qpl_dma_dir(priv, id));
788                 /* caller handles clean up */
789                 if (err)
790                         return -ENOMEM;
791                 qpl->num_entries++;
792         }
793         priv->num_registered_pages += pages;
794
795         return 0;
796 }
797
798 void gve_free_page(struct device *dev, struct page *page, dma_addr_t dma,
799                    enum dma_data_direction dir)
800 {
801         if (!dma_mapping_error(dev, dma))
802                 dma_unmap_page(dev, dma, PAGE_SIZE, dir);
803         if (page)
804                 put_page(page);
805 }
806
807 static void gve_free_queue_page_list(struct gve_priv *priv,
808                                      int id)
809 {
810         struct gve_queue_page_list *qpl = &priv->qpls[id];
811         int i;
812
813         if (!qpl->pages)
814                 return;
815         if (!qpl->page_buses)
816                 goto free_pages;
817
818         for (i = 0; i < qpl->num_entries; i++)
819                 gve_free_page(&priv->pdev->dev, qpl->pages[i],
820                               qpl->page_buses[i], gve_qpl_dma_dir(priv, id));
821
822         kvfree(qpl->page_buses);
823 free_pages:
824         kvfree(qpl->pages);
825         priv->num_registered_pages -= qpl->num_entries;
826 }
827
828 static int gve_alloc_qpls(struct gve_priv *priv)
829 {
830         int num_qpls = gve_num_tx_qpls(priv) + gve_num_rx_qpls(priv);
831         int i, j;
832         int err;
833
834         /* Raw addressing means no QPLs */
835         if (priv->queue_format == GVE_GQI_RDA_FORMAT)
836                 return 0;
837
838         priv->qpls = kvzalloc(num_qpls * sizeof(*priv->qpls), GFP_KERNEL);
839         if (!priv->qpls)
840                 return -ENOMEM;
841
842         for (i = 0; i < gve_num_tx_qpls(priv); i++) {
843                 err = gve_alloc_queue_page_list(priv, i,
844                                                 priv->tx_pages_per_qpl);
845                 if (err)
846                         goto free_qpls;
847         }
848         for (; i < num_qpls; i++) {
849                 err = gve_alloc_queue_page_list(priv, i,
850                                                 priv->rx_data_slot_cnt);
851                 if (err)
852                         goto free_qpls;
853         }
854
855         priv->qpl_cfg.qpl_map_size = BITS_TO_LONGS(num_qpls) *
856                                      sizeof(unsigned long) * BITS_PER_BYTE;
857         priv->qpl_cfg.qpl_id_map = kvzalloc(BITS_TO_LONGS(num_qpls) *
858                                             sizeof(unsigned long), GFP_KERNEL);
859         if (!priv->qpl_cfg.qpl_id_map) {
860                 err = -ENOMEM;
861                 goto free_qpls;
862         }
863
864         return 0;
865
866 free_qpls:
867         for (j = 0; j <= i; j++)
868                 gve_free_queue_page_list(priv, j);
869         kvfree(priv->qpls);
870         return err;
871 }
872
873 static void gve_free_qpls(struct gve_priv *priv)
874 {
875         int num_qpls = gve_num_tx_qpls(priv) + gve_num_rx_qpls(priv);
876         int i;
877
878         /* Raw addressing means no QPLs */
879         if (priv->queue_format == GVE_GQI_RDA_FORMAT)
880                 return;
881
882         kvfree(priv->qpl_cfg.qpl_id_map);
883
884         for (i = 0; i < num_qpls; i++)
885                 gve_free_queue_page_list(priv, i);
886
887         kvfree(priv->qpls);
888 }
889
890 /* Use this to schedule a reset when the device is capable of continuing
891  * to handle other requests in its current state. If it is not, do a reset
892  * in thread instead.
893  */
894 void gve_schedule_reset(struct gve_priv *priv)
895 {
896         gve_set_do_reset(priv);
897         queue_work(priv->gve_wq, &priv->service_task);
898 }
899
900 static void gve_reset_and_teardown(struct gve_priv *priv, bool was_up);
901 static int gve_reset_recovery(struct gve_priv *priv, bool was_up);
902 static void gve_turndown(struct gve_priv *priv);
903 static void gve_turnup(struct gve_priv *priv);
904
905 static int gve_open(struct net_device *dev)
906 {
907         struct gve_priv *priv = netdev_priv(dev);
908         int err;
909
910         err = gve_alloc_qpls(priv);
911         if (err)
912                 return err;
913
914         err = gve_alloc_rings(priv);
915         if (err)
916                 goto free_qpls;
917
918         err = netif_set_real_num_tx_queues(dev, priv->tx_cfg.num_queues);
919         if (err)
920                 goto free_rings;
921         err = netif_set_real_num_rx_queues(dev, priv->rx_cfg.num_queues);
922         if (err)
923                 goto free_rings;
924
925         err = gve_register_qpls(priv);
926         if (err)
927                 goto reset;
928
929         if (!gve_is_gqi(priv)) {
930                 /* Hard code this for now. This may be tuned in the future for
931                  * performance.
932                  */
933                 priv->data_buffer_size_dqo = GVE_RX_BUFFER_SIZE_DQO;
934         }
935         err = gve_create_rings(priv);
936         if (err)
937                 goto reset;
938
939         gve_set_device_rings_ok(priv);
940
941         if (gve_get_report_stats(priv))
942                 mod_timer(&priv->stats_report_timer,
943                           round_jiffies(jiffies +
944                                 msecs_to_jiffies(priv->stats_report_timer_period)));
945
946         gve_turnup(priv);
947         queue_work(priv->gve_wq, &priv->service_task);
948         priv->interface_up_cnt++;
949         return 0;
950
951 free_rings:
952         gve_free_rings(priv);
953 free_qpls:
954         gve_free_qpls(priv);
955         return err;
956
957 reset:
958         /* This must have been called from a reset due to the rtnl lock
959          * so just return at this point.
960          */
961         if (gve_get_reset_in_progress(priv))
962                 return err;
963         /* Otherwise reset before returning */
964         gve_reset_and_teardown(priv, true);
965         /* if this fails there is nothing we can do so just ignore the return */
966         gve_reset_recovery(priv, false);
967         /* return the original error */
968         return err;
969 }
970
971 static int gve_close(struct net_device *dev)
972 {
973         struct gve_priv *priv = netdev_priv(dev);
974         int err;
975
976         netif_carrier_off(dev);
977         if (gve_get_device_rings_ok(priv)) {
978                 gve_turndown(priv);
979                 err = gve_destroy_rings(priv);
980                 if (err)
981                         goto err;
982                 err = gve_unregister_qpls(priv);
983                 if (err)
984                         goto err;
985                 gve_clear_device_rings_ok(priv);
986         }
987         del_timer_sync(&priv->stats_report_timer);
988
989         gve_free_rings(priv);
990         gve_free_qpls(priv);
991         priv->interface_down_cnt++;
992         return 0;
993
994 err:
995         /* This must have been called from a reset due to the rtnl lock
996          * so just return at this point.
997          */
998         if (gve_get_reset_in_progress(priv))
999                 return err;
1000         /* Otherwise reset before returning */
1001         gve_reset_and_teardown(priv, true);
1002         return gve_reset_recovery(priv, false);
1003 }
1004
1005 int gve_adjust_queues(struct gve_priv *priv,
1006                       struct gve_queue_config new_rx_config,
1007                       struct gve_queue_config new_tx_config)
1008 {
1009         int err;
1010
1011         if (netif_carrier_ok(priv->dev)) {
1012                 /* To make this process as simple as possible we teardown the
1013                  * device, set the new configuration, and then bring the device
1014                  * up again.
1015                  */
1016                 err = gve_close(priv->dev);
1017                 /* we have already tried to reset in close,
1018                  * just fail at this point
1019                  */
1020                 if (err)
1021                         return err;
1022                 priv->tx_cfg = new_tx_config;
1023                 priv->rx_cfg = new_rx_config;
1024
1025                 err = gve_open(priv->dev);
1026                 if (err)
1027                         goto err;
1028
1029                 return 0;
1030         }
1031         /* Set the config for the next up. */
1032         priv->tx_cfg = new_tx_config;
1033         priv->rx_cfg = new_rx_config;
1034
1035         return 0;
1036 err:
1037         netif_err(priv, drv, priv->dev,
1038                   "Adjust queues failed! !!! DISABLING ALL QUEUES !!!\n");
1039         gve_turndown(priv);
1040         return err;
1041 }
1042
1043 static void gve_turndown(struct gve_priv *priv)
1044 {
1045         int idx;
1046
1047         if (netif_carrier_ok(priv->dev))
1048                 netif_carrier_off(priv->dev);
1049
1050         if (!gve_get_napi_enabled(priv))
1051                 return;
1052
1053         /* Disable napi to prevent more work from coming in */
1054         for (idx = 0; idx < priv->tx_cfg.num_queues; idx++) {
1055                 int ntfy_idx = gve_tx_idx_to_ntfy(priv, idx);
1056                 struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
1057
1058                 napi_disable(&block->napi);
1059         }
1060         for (idx = 0; idx < priv->rx_cfg.num_queues; idx++) {
1061                 int ntfy_idx = gve_rx_idx_to_ntfy(priv, idx);
1062                 struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
1063
1064                 napi_disable(&block->napi);
1065         }
1066
1067         /* Stop tx queues */
1068         netif_tx_disable(priv->dev);
1069
1070         gve_clear_napi_enabled(priv);
1071         gve_clear_report_stats(priv);
1072 }
1073
1074 static void gve_turnup(struct gve_priv *priv)
1075 {
1076         int idx;
1077
1078         /* Start the tx queues */
1079         netif_tx_start_all_queues(priv->dev);
1080
1081         /* Enable napi and unmask interrupts for all queues */
1082         for (idx = 0; idx < priv->tx_cfg.num_queues; idx++) {
1083                 int ntfy_idx = gve_tx_idx_to_ntfy(priv, idx);
1084                 struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
1085
1086                 napi_enable(&block->napi);
1087                 if (gve_is_gqi(priv)) {
1088                         iowrite32be(0, gve_irq_doorbell(priv, block));
1089                 } else {
1090                         u32 val = gve_set_itr_ratelimit_dqo(GVE_TX_IRQ_RATELIMIT_US_DQO);
1091
1092                         gve_write_irq_doorbell_dqo(priv, block, val);
1093                 }
1094         }
1095         for (idx = 0; idx < priv->rx_cfg.num_queues; idx++) {
1096                 int ntfy_idx = gve_rx_idx_to_ntfy(priv, idx);
1097                 struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
1098
1099                 napi_enable(&block->napi);
1100                 if (gve_is_gqi(priv)) {
1101                         iowrite32be(0, gve_irq_doorbell(priv, block));
1102                 } else {
1103                         u32 val = gve_set_itr_ratelimit_dqo(GVE_RX_IRQ_RATELIMIT_US_DQO);
1104
1105                         gve_write_irq_doorbell_dqo(priv, block, val);
1106                 }
1107         }
1108
1109         gve_set_napi_enabled(priv);
1110 }
1111
1112 static void gve_tx_timeout(struct net_device *dev, unsigned int txqueue)
1113 {
1114         struct gve_priv *priv = netdev_priv(dev);
1115
1116         gve_schedule_reset(priv);
1117         priv->tx_timeo_cnt++;
1118 }
1119
1120 static int gve_set_features(struct net_device *netdev,
1121                             netdev_features_t features)
1122 {
1123         const netdev_features_t orig_features = netdev->features;
1124         struct gve_priv *priv = netdev_priv(netdev);
1125         int err;
1126
1127         if ((netdev->features & NETIF_F_LRO) != (features & NETIF_F_LRO)) {
1128                 netdev->features ^= NETIF_F_LRO;
1129                 if (netif_carrier_ok(netdev)) {
1130                         /* To make this process as simple as possible we
1131                          * teardown the device, set the new configuration,
1132                          * and then bring the device up again.
1133                          */
1134                         err = gve_close(netdev);
1135                         /* We have already tried to reset in close, just fail
1136                          * at this point.
1137                          */
1138                         if (err)
1139                                 goto err;
1140
1141                         err = gve_open(netdev);
1142                         if (err)
1143                                 goto err;
1144                 }
1145         }
1146
1147         return 0;
1148 err:
1149         /* Reverts the change on error. */
1150         netdev->features = orig_features;
1151         netif_err(priv, drv, netdev,
1152                   "Set features failed! !!! DISABLING ALL QUEUES !!!\n");
1153         return err;
1154 }
1155
1156 static const struct net_device_ops gve_netdev_ops = {
1157         .ndo_start_xmit         =       gve_start_xmit,
1158         .ndo_open               =       gve_open,
1159         .ndo_stop               =       gve_close,
1160         .ndo_get_stats64        =       gve_get_stats,
1161         .ndo_tx_timeout         =       gve_tx_timeout,
1162         .ndo_set_features       =       gve_set_features,
1163 };
1164
1165 static void gve_handle_status(struct gve_priv *priv, u32 status)
1166 {
1167         if (GVE_DEVICE_STATUS_RESET_MASK & status) {
1168                 dev_info(&priv->pdev->dev, "Device requested reset.\n");
1169                 gve_set_do_reset(priv);
1170         }
1171         if (GVE_DEVICE_STATUS_REPORT_STATS_MASK & status) {
1172                 priv->stats_report_trigger_cnt++;
1173                 gve_set_do_report_stats(priv);
1174         }
1175 }
1176
1177 static void gve_handle_reset(struct gve_priv *priv)
1178 {
1179         /* A service task will be scheduled at the end of probe to catch any
1180          * resets that need to happen, and we don't want to reset until
1181          * probe is done.
1182          */
1183         if (gve_get_probe_in_progress(priv))
1184                 return;
1185
1186         if (gve_get_do_reset(priv)) {
1187                 rtnl_lock();
1188                 gve_reset(priv, false);
1189                 rtnl_unlock();
1190         }
1191 }
1192
1193 void gve_handle_report_stats(struct gve_priv *priv)
1194 {
1195         int idx, stats_idx = 0, tx_bytes;
1196         unsigned int start = 0;
1197         struct stats *stats = priv->stats_report->stats;
1198
1199         if (!gve_get_report_stats(priv))
1200                 return;
1201
1202         be64_add_cpu(&priv->stats_report->written_count, 1);
1203         /* tx stats */
1204         if (priv->tx) {
1205                 for (idx = 0; idx < priv->tx_cfg.num_queues; idx++) {
1206                         u32 last_completion = 0;
1207                         u32 tx_frames = 0;
1208
1209                         /* DQO doesn't currently support these metrics. */
1210                         if (gve_is_gqi(priv)) {
1211                                 last_completion = priv->tx[idx].done;
1212                                 tx_frames = priv->tx[idx].req;
1213                         }
1214
1215                         do {
1216                                 start = u64_stats_fetch_begin(&priv->tx[idx].statss);
1217                                 tx_bytes = priv->tx[idx].bytes_done;
1218                         } while (u64_stats_fetch_retry(&priv->tx[idx].statss, start));
1219                         stats[stats_idx++] = (struct stats) {
1220                                 .stat_name = cpu_to_be32(TX_WAKE_CNT),
1221                                 .value = cpu_to_be64(priv->tx[idx].wake_queue),
1222                                 .queue_id = cpu_to_be32(idx),
1223                         };
1224                         stats[stats_idx++] = (struct stats) {
1225                                 .stat_name = cpu_to_be32(TX_STOP_CNT),
1226                                 .value = cpu_to_be64(priv->tx[idx].stop_queue),
1227                                 .queue_id = cpu_to_be32(idx),
1228                         };
1229                         stats[stats_idx++] = (struct stats) {
1230                                 .stat_name = cpu_to_be32(TX_FRAMES_SENT),
1231                                 .value = cpu_to_be64(tx_frames),
1232                                 .queue_id = cpu_to_be32(idx),
1233                         };
1234                         stats[stats_idx++] = (struct stats) {
1235                                 .stat_name = cpu_to_be32(TX_BYTES_SENT),
1236                                 .value = cpu_to_be64(tx_bytes),
1237                                 .queue_id = cpu_to_be32(idx),
1238                         };
1239                         stats[stats_idx++] = (struct stats) {
1240                                 .stat_name = cpu_to_be32(TX_LAST_COMPLETION_PROCESSED),
1241                                 .value = cpu_to_be64(last_completion),
1242                                 .queue_id = cpu_to_be32(idx),
1243                         };
1244                 }
1245         }
1246         /* rx stats */
1247         if (priv->rx) {
1248                 for (idx = 0; idx < priv->rx_cfg.num_queues; idx++) {
1249                         stats[stats_idx++] = (struct stats) {
1250                                 .stat_name = cpu_to_be32(RX_NEXT_EXPECTED_SEQUENCE),
1251                                 .value = cpu_to_be64(priv->rx[idx].desc.seqno),
1252                                 .queue_id = cpu_to_be32(idx),
1253                         };
1254                         stats[stats_idx++] = (struct stats) {
1255                                 .stat_name = cpu_to_be32(RX_BUFFERS_POSTED),
1256                                 .value = cpu_to_be64(priv->rx[0].fill_cnt),
1257                                 .queue_id = cpu_to_be32(idx),
1258                         };
1259                 }
1260         }
1261 }
1262
1263 static void gve_handle_link_status(struct gve_priv *priv, bool link_status)
1264 {
1265         if (!gve_get_napi_enabled(priv))
1266                 return;
1267
1268         if (link_status == netif_carrier_ok(priv->dev))
1269                 return;
1270
1271         if (link_status) {
1272                 netdev_info(priv->dev, "Device link is up.\n");
1273                 netif_carrier_on(priv->dev);
1274         } else {
1275                 netdev_info(priv->dev, "Device link is down.\n");
1276                 netif_carrier_off(priv->dev);
1277         }
1278 }
1279
1280 /* Handle NIC status register changes, reset requests and report stats */
1281 static void gve_service_task(struct work_struct *work)
1282 {
1283         struct gve_priv *priv = container_of(work, struct gve_priv,
1284                                              service_task);
1285         u32 status = ioread32be(&priv->reg_bar0->device_status);
1286
1287         gve_handle_status(priv, status);
1288
1289         gve_handle_reset(priv);
1290         gve_handle_link_status(priv, GVE_DEVICE_STATUS_LINK_STATUS_MASK & status);
1291 }
1292
1293 static int gve_init_priv(struct gve_priv *priv, bool skip_describe_device)
1294 {
1295         int num_ntfy;
1296         int err;
1297
1298         /* Set up the adminq */
1299         err = gve_adminq_alloc(&priv->pdev->dev, priv);
1300         if (err) {
1301                 dev_err(&priv->pdev->dev,
1302                         "Failed to alloc admin queue: err=%d\n", err);
1303                 return err;
1304         }
1305
1306         if (skip_describe_device)
1307                 goto setup_device;
1308
1309         priv->queue_format = GVE_QUEUE_FORMAT_UNSPECIFIED;
1310         /* Get the initial information we need from the device */
1311         err = gve_adminq_describe_device(priv);
1312         if (err) {
1313                 dev_err(&priv->pdev->dev,
1314                         "Could not get device information: err=%d\n", err);
1315                 goto err;
1316         }
1317         if (gve_is_gqi(priv) && priv->dev->max_mtu > PAGE_SIZE) {
1318                 priv->dev->max_mtu = PAGE_SIZE;
1319                 err = gve_adminq_set_mtu(priv, priv->dev->mtu);
1320                 if (err) {
1321                         dev_err(&priv->pdev->dev, "Could not set mtu");
1322                         goto err;
1323                 }
1324         }
1325         priv->dev->mtu = priv->dev->max_mtu;
1326         num_ntfy = pci_msix_vec_count(priv->pdev);
1327         if (num_ntfy <= 0) {
1328                 dev_err(&priv->pdev->dev,
1329                         "could not count MSI-x vectors: err=%d\n", num_ntfy);
1330                 err = num_ntfy;
1331                 goto err;
1332         } else if (num_ntfy < GVE_MIN_MSIX) {
1333                 dev_err(&priv->pdev->dev, "gve needs at least %d MSI-x vectors, but only has %d\n",
1334                         GVE_MIN_MSIX, num_ntfy);
1335                 err = -EINVAL;
1336                 goto err;
1337         }
1338
1339         priv->num_registered_pages = 0;
1340         priv->rx_copybreak = GVE_DEFAULT_RX_COPYBREAK;
1341         /* gvnic has one Notification Block per MSI-x vector, except for the
1342          * management vector
1343          */
1344         priv->num_ntfy_blks = (num_ntfy - 1) & ~0x1;
1345         priv->mgmt_msix_idx = priv->num_ntfy_blks;
1346
1347         priv->tx_cfg.max_queues =
1348                 min_t(int, priv->tx_cfg.max_queues, priv->num_ntfy_blks / 2);
1349         priv->rx_cfg.max_queues =
1350                 min_t(int, priv->rx_cfg.max_queues, priv->num_ntfy_blks / 2);
1351
1352         priv->tx_cfg.num_queues = priv->tx_cfg.max_queues;
1353         priv->rx_cfg.num_queues = priv->rx_cfg.max_queues;
1354         if (priv->default_num_queues > 0) {
1355                 priv->tx_cfg.num_queues = min_t(int, priv->default_num_queues,
1356                                                 priv->tx_cfg.num_queues);
1357                 priv->rx_cfg.num_queues = min_t(int, priv->default_num_queues,
1358                                                 priv->rx_cfg.num_queues);
1359         }
1360
1361         dev_info(&priv->pdev->dev, "TX queues %d, RX queues %d\n",
1362                  priv->tx_cfg.num_queues, priv->rx_cfg.num_queues);
1363         dev_info(&priv->pdev->dev, "Max TX queues %d, Max RX queues %d\n",
1364                  priv->tx_cfg.max_queues, priv->rx_cfg.max_queues);
1365
1366 setup_device:
1367         err = gve_setup_device_resources(priv);
1368         if (!err)
1369                 return 0;
1370 err:
1371         gve_adminq_free(&priv->pdev->dev, priv);
1372         return err;
1373 }
1374
1375 static void gve_teardown_priv_resources(struct gve_priv *priv)
1376 {
1377         gve_teardown_device_resources(priv);
1378         gve_adminq_free(&priv->pdev->dev, priv);
1379 }
1380
1381 static void gve_trigger_reset(struct gve_priv *priv)
1382 {
1383         /* Reset the device by releasing the AQ */
1384         gve_adminq_release(priv);
1385 }
1386
1387 static void gve_reset_and_teardown(struct gve_priv *priv, bool was_up)
1388 {
1389         gve_trigger_reset(priv);
1390         /* With the reset having already happened, close cannot fail */
1391         if (was_up)
1392                 gve_close(priv->dev);
1393         gve_teardown_priv_resources(priv);
1394 }
1395
1396 static int gve_reset_recovery(struct gve_priv *priv, bool was_up)
1397 {
1398         int err;
1399
1400         err = gve_init_priv(priv, true);
1401         if (err)
1402                 goto err;
1403         if (was_up) {
1404                 err = gve_open(priv->dev);
1405                 if (err)
1406                         goto err;
1407         }
1408         return 0;
1409 err:
1410         dev_err(&priv->pdev->dev, "Reset failed! !!! DISABLING ALL QUEUES !!!\n");
1411         gve_turndown(priv);
1412         return err;
1413 }
1414
1415 int gve_reset(struct gve_priv *priv, bool attempt_teardown)
1416 {
1417         bool was_up = netif_carrier_ok(priv->dev);
1418         int err;
1419
1420         dev_info(&priv->pdev->dev, "Performing reset\n");
1421         gve_clear_do_reset(priv);
1422         gve_set_reset_in_progress(priv);
1423         /* If we aren't attempting to teardown normally, just go turndown and
1424          * reset right away.
1425          */
1426         if (!attempt_teardown) {
1427                 gve_turndown(priv);
1428                 gve_reset_and_teardown(priv, was_up);
1429         } else {
1430                 /* Otherwise attempt to close normally */
1431                 if (was_up) {
1432                         err = gve_close(priv->dev);
1433                         /* If that fails reset as we did above */
1434                         if (err)
1435                                 gve_reset_and_teardown(priv, was_up);
1436                 }
1437                 /* Clean up any remaining resources */
1438                 gve_teardown_priv_resources(priv);
1439         }
1440
1441         /* Set it all back up */
1442         err = gve_reset_recovery(priv, was_up);
1443         gve_clear_reset_in_progress(priv);
1444         priv->reset_cnt++;
1445         priv->interface_up_cnt = 0;
1446         priv->interface_down_cnt = 0;
1447         priv->stats_report_trigger_cnt = 0;
1448         return err;
1449 }
1450
1451 static void gve_write_version(u8 __iomem *driver_version_register)
1452 {
1453         const char *c = gve_version_prefix;
1454
1455         while (*c) {
1456                 writeb(*c, driver_version_register);
1457                 c++;
1458         }
1459
1460         c = gve_version_str;
1461         while (*c) {
1462                 writeb(*c, driver_version_register);
1463                 c++;
1464         }
1465         writeb('\n', driver_version_register);
1466 }
1467
1468 static int gve_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1469 {
1470         int max_tx_queues, max_rx_queues;
1471         struct net_device *dev;
1472         __be32 __iomem *db_bar;
1473         struct gve_registers __iomem *reg_bar;
1474         struct gve_priv *priv;
1475         int err;
1476
1477         err = pci_enable_device(pdev);
1478         if (err)
1479                 return err;
1480
1481         err = pci_request_regions(pdev, "gvnic-cfg");
1482         if (err)
1483                 goto abort_with_enabled;
1484
1485         pci_set_master(pdev);
1486
1487         err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
1488         if (err) {
1489                 dev_err(&pdev->dev, "Failed to set dma mask: err=%d\n", err);
1490                 goto abort_with_pci_region;
1491         }
1492
1493         reg_bar = pci_iomap(pdev, GVE_REGISTER_BAR, 0);
1494         if (!reg_bar) {
1495                 dev_err(&pdev->dev, "Failed to map pci bar!\n");
1496                 err = -ENOMEM;
1497                 goto abort_with_pci_region;
1498         }
1499
1500         db_bar = pci_iomap(pdev, GVE_DOORBELL_BAR, 0);
1501         if (!db_bar) {
1502                 dev_err(&pdev->dev, "Failed to map doorbell bar!\n");
1503                 err = -ENOMEM;
1504                 goto abort_with_reg_bar;
1505         }
1506
1507         gve_write_version(&reg_bar->driver_version);
1508         /* Get max queues to alloc etherdev */
1509         max_tx_queues = ioread32be(&reg_bar->max_tx_queues);
1510         max_rx_queues = ioread32be(&reg_bar->max_rx_queues);
1511         /* Alloc and setup the netdev and priv */
1512         dev = alloc_etherdev_mqs(sizeof(*priv), max_tx_queues, max_rx_queues);
1513         if (!dev) {
1514                 dev_err(&pdev->dev, "could not allocate netdev\n");
1515                 err = -ENOMEM;
1516                 goto abort_with_db_bar;
1517         }
1518         SET_NETDEV_DEV(dev, &pdev->dev);
1519         pci_set_drvdata(pdev, dev);
1520         dev->ethtool_ops = &gve_ethtool_ops;
1521         dev->netdev_ops = &gve_netdev_ops;
1522
1523         /* Set default and supported features.
1524          *
1525          * Features might be set in other locations as well (such as
1526          * `gve_adminq_describe_device`).
1527          */
1528         dev->hw_features = NETIF_F_HIGHDMA;
1529         dev->hw_features |= NETIF_F_SG;
1530         dev->hw_features |= NETIF_F_HW_CSUM;
1531         dev->hw_features |= NETIF_F_TSO;
1532         dev->hw_features |= NETIF_F_TSO6;
1533         dev->hw_features |= NETIF_F_TSO_ECN;
1534         dev->hw_features |= NETIF_F_RXCSUM;
1535         dev->hw_features |= NETIF_F_RXHASH;
1536         dev->features = dev->hw_features;
1537         dev->watchdog_timeo = 5 * HZ;
1538         dev->min_mtu = ETH_MIN_MTU;
1539         netif_carrier_off(dev);
1540
1541         priv = netdev_priv(dev);
1542         priv->dev = dev;
1543         priv->pdev = pdev;
1544         priv->msg_enable = DEFAULT_MSG_LEVEL;
1545         priv->reg_bar0 = reg_bar;
1546         priv->db_bar2 = db_bar;
1547         priv->service_task_flags = 0x0;
1548         priv->state_flags = 0x0;
1549         priv->ethtool_flags = 0x0;
1550
1551         gve_set_probe_in_progress(priv);
1552         priv->gve_wq = alloc_ordered_workqueue("gve", 0);
1553         if (!priv->gve_wq) {
1554                 dev_err(&pdev->dev, "Could not allocate workqueue");
1555                 err = -ENOMEM;
1556                 goto abort_with_netdev;
1557         }
1558         INIT_WORK(&priv->service_task, gve_service_task);
1559         INIT_WORK(&priv->stats_report_task, gve_stats_report_task);
1560         priv->tx_cfg.max_queues = max_tx_queues;
1561         priv->rx_cfg.max_queues = max_rx_queues;
1562
1563         err = gve_init_priv(priv, false);
1564         if (err)
1565                 goto abort_with_wq;
1566
1567         err = register_netdev(dev);
1568         if (err)
1569                 goto abort_with_gve_init;
1570
1571         dev_info(&pdev->dev, "GVE version %s\n", gve_version_str);
1572         dev_info(&pdev->dev, "GVE queue format %d\n", (int)priv->queue_format);
1573         gve_clear_probe_in_progress(priv);
1574         queue_work(priv->gve_wq, &priv->service_task);
1575         return 0;
1576
1577 abort_with_gve_init:
1578         gve_teardown_priv_resources(priv);
1579
1580 abort_with_wq:
1581         destroy_workqueue(priv->gve_wq);
1582
1583 abort_with_netdev:
1584         free_netdev(dev);
1585
1586 abort_with_db_bar:
1587         pci_iounmap(pdev, db_bar);
1588
1589 abort_with_reg_bar:
1590         pci_iounmap(pdev, reg_bar);
1591
1592 abort_with_pci_region:
1593         pci_release_regions(pdev);
1594
1595 abort_with_enabled:
1596         pci_disable_device(pdev);
1597         return err;
1598 }
1599
1600 static void gve_remove(struct pci_dev *pdev)
1601 {
1602         struct net_device *netdev = pci_get_drvdata(pdev);
1603         struct gve_priv *priv = netdev_priv(netdev);
1604         __be32 __iomem *db_bar = priv->db_bar2;
1605         void __iomem *reg_bar = priv->reg_bar0;
1606
1607         unregister_netdev(netdev);
1608         gve_teardown_priv_resources(priv);
1609         destroy_workqueue(priv->gve_wq);
1610         free_netdev(netdev);
1611         pci_iounmap(pdev, db_bar);
1612         pci_iounmap(pdev, reg_bar);
1613         pci_release_regions(pdev);
1614         pci_disable_device(pdev);
1615 }
1616
1617 static const struct pci_device_id gve_id_table[] = {
1618         { PCI_DEVICE(PCI_VENDOR_ID_GOOGLE, PCI_DEV_ID_GVNIC) },
1619         { }
1620 };
1621
1622 static struct pci_driver gvnic_driver = {
1623         .name           = "gvnic",
1624         .id_table       = gve_id_table,
1625         .probe          = gve_probe,
1626         .remove         = gve_remove,
1627 };
1628
1629 module_pci_driver(gvnic_driver);
1630
1631 MODULE_DEVICE_TABLE(pci, gve_id_table);
1632 MODULE_AUTHOR("Google, Inc.");
1633 MODULE_DESCRIPTION("gVNIC Driver");
1634 MODULE_LICENSE("Dual MIT/GPL");
1635 MODULE_VERSION(GVE_VERSION);