dev_ioctl: split out ndo_eth_ioctl
[linux-block.git] / drivers / net / ethernet / intel / igc / igc_main.c
CommitLineData
d89f8841
SN
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright (c) 2018 Intel Corporation */
3
4#include <linux/module.h>
5#include <linux/types.h>
c9a11c23
SN
6#include <linux/if_vlan.h>
7#include <linux/aer.h>
d3ae3cfb
SN
8#include <linux/tcp.h>
9#include <linux/udp.h>
10#include <linux/ip.h>
9513d2a5 11#include <linux/pm_runtime.h>
ec50a9d4 12#include <net/pkt_sched.h>
26575105 13#include <linux/bpf_trace.h>
fc9df2a0 14#include <net/xdp_sock_drv.h>
d3ae3cfb 15#include <net/ipv6.h>
d89f8841
SN
16
17#include "igc.h"
18#include "igc_hw.h"
ec50a9d4 19#include "igc_tsn.h"
26575105 20#include "igc_xdp.h"
d89f8841 21
d89f8841
SN
22#define DRV_SUMMARY "Intel(R) 2.5G Ethernet Linux Driver"
23
8c5ad0da
SN
24#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK)
25
26575105
AG
26#define IGC_XDP_PASS 0
27#define IGC_XDP_CONSUMED BIT(0)
73f1071c 28#define IGC_XDP_TX BIT(1)
4ff32036 29#define IGC_XDP_REDIRECT BIT(2)
26575105 30
c9a11c23
SN
31static int debug = -1;
32
d89f8841
SN
33MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
34MODULE_DESCRIPTION(DRV_SUMMARY);
35MODULE_LICENSE("GPL v2");
c9a11c23
SN
36module_param(debug, int, 0);
37MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
d89f8841
SN
38
39char igc_driver_name[] = "igc";
d89f8841
SN
40static const char igc_driver_string[] = DRV_SUMMARY;
41static const char igc_copyright[] =
42 "Copyright(c) 2018 Intel Corporation.";
43
ab405612
SN
44static const struct igc_info *igc_info_tbl[] = {
45 [board_base] = &igc_base_info,
46};
47
d89f8841 48static const struct pci_device_id igc_pci_tbl[] = {
ab405612
SN
49 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_LM), board_base },
50 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_V), board_base },
6d37a382
SN
51 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_I), board_base },
52 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I220_V), board_base },
53 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_K), board_base },
c2a3f8fe 54 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_K2), board_base },
bfa5e98c 55 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I226_K), board_base },
c2a3f8fe
SN
56 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_LMVP), board_base },
57 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_IT), board_base },
43546211
SN
58 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I226_LM), board_base },
59 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I226_V), board_base },
60 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I226_IT), board_base },
61 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I221_V), board_base },
62 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I226_BLANK_NVM), board_base },
0e7d4b93 63 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_BLANK_NVM), board_base },
d89f8841
SN
64 /* required last entry */
65 {0, }
66};
67
68MODULE_DEVICE_TABLE(pci, igc_pci_tbl);
69
3df25e4c
SN
70enum latency_range {
71 lowest_latency = 0,
72 low_latency = 1,
73 bulk_latency = 2,
74 latency_invalid = 255
75};
c9a11c23 76
8c5ad0da 77void igc_reset(struct igc_adapter *adapter)
c9a11c23 78{
25f06eff 79 struct net_device *dev = adapter->netdev;
c0071c7a 80 struct igc_hw *hw = &adapter->hw;
0373ad4d
SN
81 struct igc_fc_info *fc = &hw->fc;
82 u32 pba, hwm;
83
84 /* Repartition PBA for greater than 9k MTU if required */
85 pba = IGC_PBA_34K;
86
87 /* flow control settings
88 * The high water mark must be low enough to fit one full frame
89 * after transmitting the pause frame. As such we must have enough
90 * space to allow for us to complete our current transmit and then
91 * receive the frame that is in progress from the link partner.
92 * Set it to:
93 * - the full Rx FIFO size minus one full Tx plus one full Rx frame
94 */
95 hwm = (pba << 10) - (adapter->max_frame_size + MAX_JUMBO_FRAME_SIZE);
96
97 fc->high_water = hwm & 0xFFFFFFF0; /* 16-byte granularity */
98 fc->low_water = fc->high_water - 16;
99 fc->pause_time = 0xFFFF;
100 fc->send_xon = 1;
101 fc->current_mode = fc->requested_mode;
c0071c7a
SN
102
103 hw->mac.ops.reset_hw(hw);
104
105 if (hw->mac.ops.init_hw(hw))
25f06eff 106 netdev_err(dev, "Error on hardware initialization\n");
c0071c7a 107
93ec439a
SN
108 /* Re-establish EEE setting */
109 igc_set_eee_i225(hw, true, true, true);
110
c9a11c23 111 if (!netif_running(adapter->netdev))
a0beb3c1 112 igc_power_down_phy_copper_base(&adapter->hw);
5586838f 113
8d744963
MHZ
114 /* Enable HW to recognize an 802.1Q VLAN Ethernet packet */
115 wr32(IGC_VET, ETH_P_8021Q);
116
5f295805
VCG
117 /* Re-enable PTP, where applicable. */
118 igc_ptp_reset(adapter);
119
ec50a9d4
VCG
120 /* Re-enable TSN offloading, where applicable. */
121 igc_tsn_offload_apply(adapter);
122
5586838f 123 igc_get_phy_info(hw);
c9a11c23
SN
124}
125
126/**
684ea87c 127 * igc_power_up_link - Power up the phy link
c9a11c23
SN
128 * @adapter: address of board private structure
129 */
130static void igc_power_up_link(struct igc_adapter *adapter)
131{
5586838f
SN
132 igc_reset_phy(&adapter->hw);
133
2b374e37 134 igc_power_up_phy_copper(&adapter->hw);
5586838f
SN
135
136 igc_setup_link(&adapter->hw);
c9a11c23
SN
137}
138
c9a11c23
SN
139/**
140 * igc_release_hw_control - release control of the h/w to f/w
141 * @adapter: address of board private structure
142 *
143 * igc_release_hw_control resets CTRL_EXT:DRV_LOAD bit.
144 * For ASF and Pass Through versions of f/w this means that the
145 * driver is no longer loaded.
146 */
147static void igc_release_hw_control(struct igc_adapter *adapter)
148{
149 struct igc_hw *hw = &adapter->hw;
150 u32 ctrl_ext;
151
152 /* Let firmware take over control of h/w */
153 ctrl_ext = rd32(IGC_CTRL_EXT);
154 wr32(IGC_CTRL_EXT,
155 ctrl_ext & ~IGC_CTRL_EXT_DRV_LOAD);
156}
157
158/**
159 * igc_get_hw_control - get control of the h/w from f/w
160 * @adapter: address of board private structure
161 *
162 * igc_get_hw_control sets CTRL_EXT:DRV_LOAD bit.
163 * For ASF and Pass Through versions of f/w this means that
164 * the driver is loaded.
165 */
166static void igc_get_hw_control(struct igc_adapter *adapter)
167{
168 struct igc_hw *hw = &adapter->hw;
169 u32 ctrl_ext;
170
171 /* Let firmware know the driver has taken over */
172 ctrl_ext = rd32(IGC_CTRL_EXT);
173 wr32(IGC_CTRL_EXT,
174 ctrl_ext | IGC_CTRL_EXT_DRV_LOAD);
175}
176
61234295
AG
177static void igc_unmap_tx_buffer(struct device *dev, struct igc_tx_buffer *buf)
178{
179 dma_unmap_single(dev, dma_unmap_addr(buf, dma),
180 dma_unmap_len(buf, len), DMA_TO_DEVICE);
181
182 dma_unmap_len_set(buf, len, 0);
183}
184
13b5b7fd
SN
185/**
186 * igc_clean_tx_ring - Free Tx Buffers
187 * @tx_ring: ring to be cleaned
188 */
189static void igc_clean_tx_ring(struct igc_ring *tx_ring)
190{
191 u16 i = tx_ring->next_to_clean;
192 struct igc_tx_buffer *tx_buffer = &tx_ring->tx_buffer_info[i];
9acf59a7 193 u32 xsk_frames = 0;
13b5b7fd
SN
194
195 while (i != tx_ring->next_to_use) {
196 union igc_adv_tx_desc *eop_desc, *tx_desc;
197
859b4dfa 198 switch (tx_buffer->type) {
9acf59a7
AG
199 case IGC_TX_BUFFER_TYPE_XSK:
200 xsk_frames++;
201 break;
859b4dfa 202 case IGC_TX_BUFFER_TYPE_XDP:
73f1071c 203 xdp_return_frame(tx_buffer->xdpf);
9acf59a7 204 igc_unmap_tx_buffer(tx_ring->dev, tx_buffer);
859b4dfa
AG
205 break;
206 case IGC_TX_BUFFER_TYPE_SKB:
73f1071c 207 dev_kfree_skb_any(tx_buffer->skb);
9acf59a7 208 igc_unmap_tx_buffer(tx_ring->dev, tx_buffer);
859b4dfa
AG
209 break;
210 default:
211 netdev_warn_once(tx_ring->netdev, "Unknown Tx buffer type\n");
212 break;
213 }
13b5b7fd 214
13b5b7fd
SN
215 /* check for eop_desc to determine the end of the packet */
216 eop_desc = tx_buffer->next_to_watch;
217 tx_desc = IGC_TX_DESC(tx_ring, i);
218
219 /* unmap remaining buffers */
220 while (tx_desc != eop_desc) {
221 tx_buffer++;
222 tx_desc++;
223 i++;
224 if (unlikely(i == tx_ring->count)) {
225 i = 0;
226 tx_buffer = tx_ring->tx_buffer_info;
227 tx_desc = IGC_TX_DESC(tx_ring, 0);
228 }
229
230 /* unmap any remaining paged data */
231 if (dma_unmap_len(tx_buffer, len))
61234295 232 igc_unmap_tx_buffer(tx_ring->dev, tx_buffer);
13b5b7fd
SN
233 }
234
56ea7ed1
VCG
235 tx_buffer->next_to_watch = NULL;
236
13b5b7fd
SN
237 /* move us one more past the eop_desc for start of next pkt */
238 tx_buffer++;
239 i++;
240 if (unlikely(i == tx_ring->count)) {
241 i = 0;
242 tx_buffer = tx_ring->tx_buffer_info;
243 }
244 }
245
9acf59a7
AG
246 if (tx_ring->xsk_pool && xsk_frames)
247 xsk_tx_completed(tx_ring->xsk_pool, xsk_frames);
248
13b5b7fd
SN
249 /* reset BQL for queue */
250 netdev_tx_reset_queue(txring_txq(tx_ring));
251
252 /* reset next_to_use and next_to_clean */
253 tx_ring->next_to_use = 0;
254 tx_ring->next_to_clean = 0;
255}
256
14504ac5
SN
257/**
258 * igc_free_tx_resources - Free Tx Resources per Queue
259 * @tx_ring: Tx descriptor ring for a specific queue
260 *
261 * Free all transmit software resources
262 */
263void igc_free_tx_resources(struct igc_ring *tx_ring)
264{
265 igc_clean_tx_ring(tx_ring);
266
267 vfree(tx_ring->tx_buffer_info);
268 tx_ring->tx_buffer_info = NULL;
269
270 /* if not set, then don't free */
271 if (!tx_ring->desc)
272 return;
273
274 dma_free_coherent(tx_ring->dev, tx_ring->size,
275 tx_ring->desc, tx_ring->dma);
276
277 tx_ring->desc = NULL;
278}
279
280/**
281 * igc_free_all_tx_resources - Free Tx Resources for All Queues
282 * @adapter: board private structure
283 *
284 * Free all transmit software resources
285 */
286static void igc_free_all_tx_resources(struct igc_adapter *adapter)
287{
288 int i;
289
290 for (i = 0; i < adapter->num_tx_queues; i++)
291 igc_free_tx_resources(adapter->tx_ring[i]);
292}
293
0507ef8a
SN
294/**
295 * igc_clean_all_tx_rings - Free Tx Buffers for all queues
296 * @adapter: board private structure
297 */
298static void igc_clean_all_tx_rings(struct igc_adapter *adapter)
299{
300 int i;
301
302 for (i = 0; i < adapter->num_tx_queues; i++)
303 if (adapter->tx_ring[i])
304 igc_clean_tx_ring(adapter->tx_ring[i]);
305}
306
13b5b7fd
SN
307/**
308 * igc_setup_tx_resources - allocate Tx resources (Descriptors)
309 * @tx_ring: tx descriptor ring (for a specific queue) to setup
310 *
311 * Return 0 on success, negative on failure
312 */
8c5ad0da 313int igc_setup_tx_resources(struct igc_ring *tx_ring)
13b5b7fd 314{
25f06eff 315 struct net_device *ndev = tx_ring->netdev;
13b5b7fd
SN
316 struct device *dev = tx_ring->dev;
317 int size = 0;
318
319 size = sizeof(struct igc_tx_buffer) * tx_ring->count;
320 tx_ring->tx_buffer_info = vzalloc(size);
321 if (!tx_ring->tx_buffer_info)
322 goto err;
323
324 /* round up to nearest 4K */
325 tx_ring->size = tx_ring->count * sizeof(union igc_adv_tx_desc);
326 tx_ring->size = ALIGN(tx_ring->size, 4096);
327
328 tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
329 &tx_ring->dma, GFP_KERNEL);
330
331 if (!tx_ring->desc)
332 goto err;
333
334 tx_ring->next_to_use = 0;
335 tx_ring->next_to_clean = 0;
336
337 return 0;
338
339err:
340 vfree(tx_ring->tx_buffer_info);
25f06eff 341 netdev_err(ndev, "Unable to allocate memory for Tx descriptor ring\n");
13b5b7fd
SN
342 return -ENOMEM;
343}
344
345/**
346 * igc_setup_all_tx_resources - wrapper to allocate Tx resources for all queues
347 * @adapter: board private structure
348 *
349 * Return 0 on success, negative on failure
350 */
351static int igc_setup_all_tx_resources(struct igc_adapter *adapter)
352{
25f06eff 353 struct net_device *dev = adapter->netdev;
13b5b7fd
SN
354 int i, err = 0;
355
356 for (i = 0; i < adapter->num_tx_queues; i++) {
357 err = igc_setup_tx_resources(adapter->tx_ring[i]);
358 if (err) {
25f06eff 359 netdev_err(dev, "Error on Tx queue %u setup\n", i);
13b5b7fd
SN
360 for (i--; i >= 0; i--)
361 igc_free_tx_resources(adapter->tx_ring[i]);
362 break;
363 }
364 }
365
366 return err;
367}
368
f4851648 369static void igc_clean_rx_ring_page_shared(struct igc_ring *rx_ring)
13b5b7fd
SN
370{
371 u16 i = rx_ring->next_to_clean;
372
399e06a5 373 dev_kfree_skb(rx_ring->skb);
13b5b7fd
SN
374 rx_ring->skb = NULL;
375
376 /* Free all the Rx ring sk_buffs */
377 while (i != rx_ring->next_to_alloc) {
378 struct igc_rx_buffer *buffer_info = &rx_ring->rx_buffer_info[i];
379
380 /* Invalidate cache lines that may have been written to by
381 * device so that we avoid corrupting memory.
382 */
383 dma_sync_single_range_for_cpu(rx_ring->dev,
384 buffer_info->dma,
385 buffer_info->page_offset,
386 igc_rx_bufsz(rx_ring),
387 DMA_FROM_DEVICE);
388
389 /* free resources associated with mapping */
390 dma_unmap_page_attrs(rx_ring->dev,
391 buffer_info->dma,
392 igc_rx_pg_size(rx_ring),
393 DMA_FROM_DEVICE,
394 IGC_RX_DMA_ATTR);
395 __page_frag_cache_drain(buffer_info->page,
396 buffer_info->pagecnt_bias);
397
398 i++;
399 if (i == rx_ring->count)
400 i = 0;
401 }
f4851648
AG
402}
403
fc9df2a0
AG
404static void igc_clean_rx_ring_xsk_pool(struct igc_ring *ring)
405{
406 struct igc_rx_buffer *bi;
407 u16 i;
408
409 for (i = 0; i < ring->count; i++) {
410 bi = &ring->rx_buffer_info[i];
411 if (!bi->xdp)
412 continue;
413
414 xsk_buff_free(bi->xdp);
415 bi->xdp = NULL;
416 }
417}
418
f4851648
AG
419/**
420 * igc_clean_rx_ring - Free Rx Buffers per Queue
421 * @ring: ring to free buffers from
422 */
423static void igc_clean_rx_ring(struct igc_ring *ring)
424{
fc9df2a0
AG
425 if (ring->xsk_pool)
426 igc_clean_rx_ring_xsk_pool(ring);
427 else
428 igc_clean_rx_ring_page_shared(ring);
13b5b7fd 429
f4851648 430 clear_ring_uses_large_buffer(ring);
26575105 431
f4851648
AG
432 ring->next_to_alloc = 0;
433 ring->next_to_clean = 0;
434 ring->next_to_use = 0;
13b5b7fd
SN
435}
436
0507ef8a
SN
437/**
438 * igc_clean_all_rx_rings - Free Rx Buffers for all queues
439 * @adapter: board private structure
440 */
441static void igc_clean_all_rx_rings(struct igc_adapter *adapter)
442{
443 int i;
444
445 for (i = 0; i < adapter->num_rx_queues; i++)
446 if (adapter->rx_ring[i])
447 igc_clean_rx_ring(adapter->rx_ring[i]);
448}
449
13b5b7fd
SN
450/**
451 * igc_free_rx_resources - Free Rx Resources
452 * @rx_ring: ring to clean the resources from
453 *
454 * Free all receive software resources
455 */
8c5ad0da 456void igc_free_rx_resources(struct igc_ring *rx_ring)
13b5b7fd
SN
457{
458 igc_clean_rx_ring(rx_ring);
459
4609ffb9 460 xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
73f1071c 461
13b5b7fd
SN
462 vfree(rx_ring->rx_buffer_info);
463 rx_ring->rx_buffer_info = NULL;
464
465 /* if not set, then don't free */
466 if (!rx_ring->desc)
467 return;
468
469 dma_free_coherent(rx_ring->dev, rx_ring->size,
470 rx_ring->desc, rx_ring->dma);
471
472 rx_ring->desc = NULL;
473}
474
475/**
476 * igc_free_all_rx_resources - Free Rx Resources for All Queues
477 * @adapter: board private structure
478 *
479 * Free all receive software resources
480 */
481static void igc_free_all_rx_resources(struct igc_adapter *adapter)
482{
483 int i;
484
485 for (i = 0; i < adapter->num_rx_queues; i++)
486 igc_free_rx_resources(adapter->rx_ring[i]);
487}
488
489/**
490 * igc_setup_rx_resources - allocate Rx resources (Descriptors)
491 * @rx_ring: rx descriptor ring (for a specific queue) to setup
492 *
493 * Returns 0 on success, negative on failure
494 */
8c5ad0da 495int igc_setup_rx_resources(struct igc_ring *rx_ring)
13b5b7fd 496{
25f06eff 497 struct net_device *ndev = rx_ring->netdev;
13b5b7fd 498 struct device *dev = rx_ring->dev;
4609ffb9 499 u8 index = rx_ring->queue_index;
73f1071c
AG
500 int size, desc_len, res;
501
4609ffb9
AG
502 res = xdp_rxq_info_reg(&rx_ring->xdp_rxq, ndev, index,
503 rx_ring->q_vector->napi.napi_id);
504 if (res < 0) {
505 netdev_err(ndev, "Failed to register xdp_rxq index %u\n",
506 index);
73f1071c 507 return res;
4609ffb9 508 }
13b5b7fd
SN
509
510 size = sizeof(struct igc_rx_buffer) * rx_ring->count;
511 rx_ring->rx_buffer_info = vzalloc(size);
512 if (!rx_ring->rx_buffer_info)
513 goto err;
514
515 desc_len = sizeof(union igc_adv_rx_desc);
516
517 /* Round up to nearest 4K */
518 rx_ring->size = rx_ring->count * desc_len;
519 rx_ring->size = ALIGN(rx_ring->size, 4096);
520
521 rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
522 &rx_ring->dma, GFP_KERNEL);
523
524 if (!rx_ring->desc)
525 goto err;
526
527 rx_ring->next_to_alloc = 0;
528 rx_ring->next_to_clean = 0;
529 rx_ring->next_to_use = 0;
530
531 return 0;
532
533err:
4609ffb9 534 xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
13b5b7fd
SN
535 vfree(rx_ring->rx_buffer_info);
536 rx_ring->rx_buffer_info = NULL;
25f06eff 537 netdev_err(ndev, "Unable to allocate memory for Rx descriptor ring\n");
13b5b7fd
SN
538 return -ENOMEM;
539}
540
541/**
542 * igc_setup_all_rx_resources - wrapper to allocate Rx resources
543 * (Descriptors) for all queues
544 * @adapter: board private structure
545 *
546 * Return 0 on success, negative on failure
547 */
548static int igc_setup_all_rx_resources(struct igc_adapter *adapter)
549{
25f06eff 550 struct net_device *dev = adapter->netdev;
13b5b7fd
SN
551 int i, err = 0;
552
553 for (i = 0; i < adapter->num_rx_queues; i++) {
554 err = igc_setup_rx_resources(adapter->rx_ring[i]);
555 if (err) {
25f06eff 556 netdev_err(dev, "Error on Rx queue %u setup\n", i);
13b5b7fd
SN
557 for (i--; i >= 0; i--)
558 igc_free_rx_resources(adapter->rx_ring[i]);
559 break;
560 }
561 }
562
563 return err;
564}
565
fc9df2a0
AG
566static struct xsk_buff_pool *igc_get_xsk_pool(struct igc_adapter *adapter,
567 struct igc_ring *ring)
568{
569 if (!igc_xdp_is_enabled(adapter) ||
570 !test_bit(IGC_RING_FLAG_AF_XDP_ZC, &ring->flags))
571 return NULL;
572
573 return xsk_get_pool_from_qid(ring->netdev, ring->queue_index);
574}
575
13b5b7fd
SN
576/**
577 * igc_configure_rx_ring - Configure a receive ring after Reset
578 * @adapter: board private structure
579 * @ring: receive ring to be configured
580 *
581 * Configure the Rx unit of the MAC after a reset.
582 */
583static void igc_configure_rx_ring(struct igc_adapter *adapter,
584 struct igc_ring *ring)
585{
586 struct igc_hw *hw = &adapter->hw;
587 union igc_adv_rx_desc *rx_desc;
588 int reg_idx = ring->reg_idx;
589 u32 srrctl = 0, rxdctl = 0;
590 u64 rdba = ring->dma;
fc9df2a0
AG
591 u32 buf_size;
592
593 xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq);
594 ring->xsk_pool = igc_get_xsk_pool(adapter, ring);
595 if (ring->xsk_pool) {
596 WARN_ON(xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
597 MEM_TYPE_XSK_BUFF_POOL,
598 NULL));
599 xsk_pool_set_rxq_info(ring->xsk_pool, &ring->xdp_rxq);
600 } else {
601 WARN_ON(xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
602 MEM_TYPE_PAGE_SHARED,
603 NULL));
604 }
4609ffb9 605
26575105
AG
606 if (igc_xdp_is_enabled(adapter))
607 set_ring_uses_large_buffer(ring);
608
13b5b7fd
SN
609 /* disable the queue */
610 wr32(IGC_RXDCTL(reg_idx), 0);
611
612 /* Set DMA base address registers */
613 wr32(IGC_RDBAL(reg_idx),
614 rdba & 0x00000000ffffffffULL);
615 wr32(IGC_RDBAH(reg_idx), rdba >> 32);
616 wr32(IGC_RDLEN(reg_idx),
617 ring->count * sizeof(union igc_adv_rx_desc));
618
619 /* initialize head and tail */
620 ring->tail = adapter->io_addr + IGC_RDT(reg_idx);
621 wr32(IGC_RDH(reg_idx), 0);
622 writel(0, ring->tail);
623
624 /* reset next-to- use/clean to place SW in sync with hardware */
625 ring->next_to_clean = 0;
626 ring->next_to_use = 0;
627
fc9df2a0
AG
628 if (ring->xsk_pool)
629 buf_size = xsk_pool_get_rx_frame_size(ring->xsk_pool);
630 else if (ring_uses_large_buffer(ring))
631 buf_size = IGC_RXBUFFER_3072;
13b5b7fd 632 else
fc9df2a0
AG
633 buf_size = IGC_RXBUFFER_2048;
634
635 srrctl = IGC_RX_HDR_LEN << IGC_SRRCTL_BSIZEHDRSIZE_SHIFT;
636 srrctl |= buf_size >> IGC_SRRCTL_BSIZEPKT_SHIFT;
13b5b7fd
SN
637 srrctl |= IGC_SRRCTL_DESCTYPE_ADV_ONEBUF;
638
639 wr32(IGC_SRRCTL(reg_idx), srrctl);
640
641 rxdctl |= IGC_RX_PTHRESH;
642 rxdctl |= IGC_RX_HTHRESH << 8;
643 rxdctl |= IGC_RX_WTHRESH << 16;
644
645 /* initialize rx_buffer_info */
646 memset(ring->rx_buffer_info, 0,
647 sizeof(struct igc_rx_buffer) * ring->count);
648
649 /* initialize Rx descriptor 0 */
650 rx_desc = IGC_RX_DESC(ring, 0);
651 rx_desc->wb.upper.length = 0;
652
653 /* enable receive descriptor fetching */
654 rxdctl |= IGC_RXDCTL_QUEUE_ENABLE;
655
656 wr32(IGC_RXDCTL(reg_idx), rxdctl);
657}
658
659/**
660 * igc_configure_rx - Configure receive Unit after Reset
661 * @adapter: board private structure
662 *
663 * Configure the Rx unit of the MAC after a reset.
664 */
665static void igc_configure_rx(struct igc_adapter *adapter)
666{
667 int i;
668
669 /* Setup the HW Rx Head and Tail Descriptor Pointers and
670 * the Base and Length of the Rx Descriptor Ring
671 */
672 for (i = 0; i < adapter->num_rx_queues; i++)
673 igc_configure_rx_ring(adapter, adapter->rx_ring[i]);
674}
675
676/**
677 * igc_configure_tx_ring - Configure transmit ring after Reset
678 * @adapter: board private structure
679 * @ring: tx ring to configure
680 *
681 * Configure a transmit ring after a reset.
682 */
683static void igc_configure_tx_ring(struct igc_adapter *adapter,
684 struct igc_ring *ring)
685{
686 struct igc_hw *hw = &adapter->hw;
687 int reg_idx = ring->reg_idx;
688 u64 tdba = ring->dma;
689 u32 txdctl = 0;
690
9acf59a7
AG
691 ring->xsk_pool = igc_get_xsk_pool(adapter, ring);
692
13b5b7fd
SN
693 /* disable the queue */
694 wr32(IGC_TXDCTL(reg_idx), 0);
695 wrfl();
696 mdelay(10);
697
698 wr32(IGC_TDLEN(reg_idx),
699 ring->count * sizeof(union igc_adv_tx_desc));
700 wr32(IGC_TDBAL(reg_idx),
701 tdba & 0x00000000ffffffffULL);
702 wr32(IGC_TDBAH(reg_idx), tdba >> 32);
703
704 ring->tail = adapter->io_addr + IGC_TDT(reg_idx);
705 wr32(IGC_TDH(reg_idx), 0);
706 writel(0, ring->tail);
707
708 txdctl |= IGC_TX_PTHRESH;
709 txdctl |= IGC_TX_HTHRESH << 8;
710 txdctl |= IGC_TX_WTHRESH << 16;
711
712 txdctl |= IGC_TXDCTL_QUEUE_ENABLE;
713 wr32(IGC_TXDCTL(reg_idx), txdctl);
714}
715
716/**
717 * igc_configure_tx - Configure transmit Unit after Reset
718 * @adapter: board private structure
719 *
720 * Configure the Tx unit of the MAC after a reset.
721 */
722static void igc_configure_tx(struct igc_adapter *adapter)
723{
724 int i;
725
726 for (i = 0; i < adapter->num_tx_queues; i++)
727 igc_configure_tx_ring(adapter, adapter->tx_ring[i]);
728}
729
730/**
731 * igc_setup_mrqc - configure the multiple receive queue control registers
732 * @adapter: Board private structure
733 */
734static void igc_setup_mrqc(struct igc_adapter *adapter)
735{
2121c271
SN
736 struct igc_hw *hw = &adapter->hw;
737 u32 j, num_rx_queues;
738 u32 mrqc, rxcsum;
739 u32 rss_key[10];
740
741 netdev_rss_key_fill(rss_key, sizeof(rss_key));
742 for (j = 0; j < 10; j++)
743 wr32(IGC_RSSRK(j), rss_key[j]);
744
745 num_rx_queues = adapter->rss_queues;
746
747 if (adapter->rss_indir_tbl_init != num_rx_queues) {
748 for (j = 0; j < IGC_RETA_SIZE; j++)
749 adapter->rss_indir_tbl[j] =
750 (j * num_rx_queues) / IGC_RETA_SIZE;
751 adapter->rss_indir_tbl_init = num_rx_queues;
752 }
753 igc_write_rss_indir_tbl(adapter);
754
755 /* Disable raw packet checksumming so that RSS hash is placed in
756 * descriptor on writeback. No need to enable TCP/UDP/IP checksum
757 * offloads as they are enabled by default
758 */
759 rxcsum = rd32(IGC_RXCSUM);
760 rxcsum |= IGC_RXCSUM_PCSD;
761
762 /* Enable Receive Checksum Offload for SCTP */
763 rxcsum |= IGC_RXCSUM_CRCOFL;
764
765 /* Don't need to set TUOFL or IPOFL, they default to 1 */
766 wr32(IGC_RXCSUM, rxcsum);
767
768 /* Generate RSS hash based on packet types, TCP/UDP
769 * port numbers and/or IPv4/v6 src and dst addresses
770 */
771 mrqc = IGC_MRQC_RSS_FIELD_IPV4 |
772 IGC_MRQC_RSS_FIELD_IPV4_TCP |
773 IGC_MRQC_RSS_FIELD_IPV6 |
774 IGC_MRQC_RSS_FIELD_IPV6_TCP |
775 IGC_MRQC_RSS_FIELD_IPV6_TCP_EX;
776
777 if (adapter->flags & IGC_FLAG_RSS_FIELD_IPV4_UDP)
778 mrqc |= IGC_MRQC_RSS_FIELD_IPV4_UDP;
779 if (adapter->flags & IGC_FLAG_RSS_FIELD_IPV6_UDP)
780 mrqc |= IGC_MRQC_RSS_FIELD_IPV6_UDP;
781
782 mrqc |= IGC_MRQC_ENABLE_RSS_MQ;
783
784 wr32(IGC_MRQC, mrqc);
13b5b7fd
SN
785}
786
787/**
788 * igc_setup_rctl - configure the receive control registers
789 * @adapter: Board private structure
790 */
791static void igc_setup_rctl(struct igc_adapter *adapter)
792{
793 struct igc_hw *hw = &adapter->hw;
794 u32 rctl;
795
796 rctl = rd32(IGC_RCTL);
797
798 rctl &= ~(3 << IGC_RCTL_MO_SHIFT);
799 rctl &= ~(IGC_RCTL_LBM_TCVR | IGC_RCTL_LBM_MAC);
800
801 rctl |= IGC_RCTL_EN | IGC_RCTL_BAM | IGC_RCTL_RDMTS_HALF |
802 (hw->mac.mc_filter_type << IGC_RCTL_MO_SHIFT);
803
804 /* enable stripping of CRC. Newer features require
805 * that the HW strips the CRC.
806 */
807 rctl |= IGC_RCTL_SECRC;
808
809 /* disable store bad packets and clear size bits. */
810 rctl &= ~(IGC_RCTL_SBP | IGC_RCTL_SZ_256);
811
812 /* enable LPE to allow for reception of jumbo frames */
813 rctl |= IGC_RCTL_LPE;
814
815 /* disable queue 0 to prevent tail write w/o re-config */
816 wr32(IGC_RXDCTL(0), 0);
817
818 /* This is useful for sniffing bad packets. */
819 if (adapter->netdev->features & NETIF_F_RXALL) {
820 /* UPE and MPE will be handled by normal PROMISC logic
821 * in set_rx_mode
822 */
823 rctl |= (IGC_RCTL_SBP | /* Receive bad packets */
824 IGC_RCTL_BAM | /* RX All Bcast Pkts */
825 IGC_RCTL_PMCF); /* RX All MAC Ctrl Pkts */
826
827 rctl &= ~(IGC_RCTL_DPF | /* Allow filtered pause */
828 IGC_RCTL_CFIEN); /* Disable VLAN CFIEN Filter */
829 }
830
831 wr32(IGC_RCTL, rctl);
832}
833
834/**
835 * igc_setup_tctl - configure the transmit control registers
836 * @adapter: Board private structure
837 */
838static void igc_setup_tctl(struct igc_adapter *adapter)
839{
840 struct igc_hw *hw = &adapter->hw;
841 u32 tctl;
842
843 /* disable queue 0 which icould be enabled by default */
844 wr32(IGC_TXDCTL(0), 0);
845
846 /* Program the Transmit Control Register */
847 tctl = rd32(IGC_TCTL);
848 tctl &= ~IGC_TCTL_CT;
849 tctl |= IGC_TCTL_PSP | IGC_TCTL_RTLC |
850 (IGC_COLLISION_THRESHOLD << IGC_CT_SHIFT);
851
852 /* Enable transmits */
853 tctl |= IGC_TCTL_EN;
854
855 wr32(IGC_TCTL, tctl);
856}
857
3988d8bf 858/**
424045be
AG
859 * igc_set_mac_filter_hw() - Set MAC address filter in hardware
860 * @adapter: Pointer to adapter where the filter should be set
861 * @index: Filter index
750433d0
AG
862 * @type: MAC address filter type (source or destination)
863 * @addr: MAC address
424045be
AG
864 * @queue: If non-negative, queue assignment feature is enabled and frames
865 * matching the filter are enqueued onto 'queue'. Otherwise, queue
866 * assignment is disabled.
3988d8bf 867 */
424045be 868static void igc_set_mac_filter_hw(struct igc_adapter *adapter, int index,
750433d0 869 enum igc_mac_filter_type type,
424045be 870 const u8 *addr, int queue)
3988d8bf 871{
949b922e 872 struct net_device *dev = adapter->netdev;
3988d8bf 873 struct igc_hw *hw = &adapter->hw;
424045be 874 u32 ral, rah;
3988d8bf 875
424045be
AG
876 if (WARN_ON(index >= hw->mac.rar_entry_count))
877 return;
3988d8bf 878
424045be
AG
879 ral = le32_to_cpup((__le32 *)(addr));
880 rah = le16_to_cpup((__le16 *)(addr + 4));
3988d8bf 881
750433d0
AG
882 if (type == IGC_MAC_FILTER_TYPE_SRC) {
883 rah &= ~IGC_RAH_ASEL_MASK;
884 rah |= IGC_RAH_ASEL_SRC_ADDR;
3988d8bf
SN
885 }
886
424045be
AG
887 if (queue >= 0) {
888 rah &= ~IGC_RAH_QSEL_MASK;
889 rah |= (queue << IGC_RAH_QSEL_SHIFT);
890 rah |= IGC_RAH_QSEL_ENABLE;
27945ebe
AG
891 }
892
424045be 893 rah |= IGC_RAH_AV;
3988d8bf 894
424045be
AG
895 wr32(IGC_RAL(index), ral);
896 wr32(IGC_RAH(index), rah);
949b922e
AG
897
898 netdev_dbg(dev, "MAC address filter set in HW: index %d", index);
424045be
AG
899}
900
901/**
902 * igc_clear_mac_filter_hw() - Clear MAC address filter in hardware
903 * @adapter: Pointer to adapter where the filter should be cleared
904 * @index: Filter index
905 */
906static void igc_clear_mac_filter_hw(struct igc_adapter *adapter, int index)
907{
949b922e 908 struct net_device *dev = adapter->netdev;
424045be
AG
909 struct igc_hw *hw = &adapter->hw;
910
911 if (WARN_ON(index >= hw->mac.rar_entry_count))
912 return;
913
914 wr32(IGC_RAL(index), 0);
915 wr32(IGC_RAH(index), 0);
949b922e
AG
916
917 netdev_dbg(dev, "MAC address filter cleared in HW: index %d", index);
3988d8bf
SN
918}
919
920/* Set default MAC address for the PF in the first RAR entry */
921static void igc_set_default_mac_filter(struct igc_adapter *adapter)
922{
949b922e
AG
923 struct net_device *dev = adapter->netdev;
924 u8 *addr = adapter->hw.mac.addr;
3988d8bf 925
949b922e 926 netdev_dbg(dev, "Set default MAC address filter: address %pM", addr);
3988d8bf 927
750433d0 928 igc_set_mac_filter_hw(adapter, 0, IGC_MAC_FILTER_TYPE_DST, addr, -1);
3988d8bf
SN
929}
930
c9a11c23
SN
931/**
932 * igc_set_mac - Change the Ethernet Address of the NIC
933 * @netdev: network interface device structure
934 * @p: pointer to an address structure
935 *
936 * Returns 0 on success, negative on failure
937 */
938static int igc_set_mac(struct net_device *netdev, void *p)
939{
940 struct igc_adapter *adapter = netdev_priv(netdev);
941 struct igc_hw *hw = &adapter->hw;
942 struct sockaddr *addr = p;
943
944 if (!is_valid_ether_addr(addr->sa_data))
945 return -EADDRNOTAVAIL;
946
947 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
948 memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
949
950 /* set the correct pool for the new PF MAC address in entry 0 */
951 igc_set_default_mac_filter(adapter);
952
953 return 0;
954}
955
7f839684
SN
956/**
957 * igc_write_mc_addr_list - write multicast addresses to MTA
958 * @netdev: network interface device structure
959 *
960 * Writes multicast address list to the MTA hash table.
961 * Returns: -ENOMEM on failure
962 * 0 on no addresses written
963 * X on writing X addresses to MTA
964 **/
965static int igc_write_mc_addr_list(struct net_device *netdev)
966{
967 struct igc_adapter *adapter = netdev_priv(netdev);
968 struct igc_hw *hw = &adapter->hw;
969 struct netdev_hw_addr *ha;
970 u8 *mta_list;
971 int i;
972
973 if (netdev_mc_empty(netdev)) {
974 /* nothing to program, so clear mc list */
975 igc_update_mc_addr_list(hw, NULL, 0);
976 return 0;
977 }
978
979 mta_list = kcalloc(netdev_mc_count(netdev), 6, GFP_ATOMIC);
980 if (!mta_list)
981 return -ENOMEM;
982
983 /* The shared function expects a packed array of only addresses. */
984 i = 0;
985 netdev_for_each_mc_addr(ha, netdev)
986 memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN);
987
988 igc_update_mc_addr_list(hw, mta_list, i);
989 kfree(mta_list);
990
991 return netdev_mc_count(netdev);
992}
993
82faa9b7
VCG
994static __le32 igc_tx_launchtime(struct igc_adapter *adapter, ktime_t txtime)
995{
996 ktime_t cycle_time = adapter->cycle_time;
997 ktime_t base_time = adapter->base_time;
998 u32 launchtime;
999
1000 /* FIXME: when using ETF together with taprio, we may have a
1001 * case where 'delta' is larger than the cycle_time, this may
1002 * cause problems if we don't read the current value of
1003 * IGC_BASET, as the value writen into the launchtime
1004 * descriptor field may be misinterpreted.
1005 */
1006 div_s64_rem(ktime_sub_ns(txtime, base_time), cycle_time, &launchtime);
1007
1008 return cpu_to_le32(launchtime);
1009}
1010
d3ae3cfb
SN
1011static void igc_tx_ctxtdesc(struct igc_ring *tx_ring,
1012 struct igc_tx_buffer *first,
1013 u32 vlan_macip_lens, u32 type_tucmd,
1014 u32 mss_l4len_idx)
1015{
1016 struct igc_adv_tx_context_desc *context_desc;
1017 u16 i = tx_ring->next_to_use;
d3ae3cfb
SN
1018
1019 context_desc = IGC_TX_CTXTDESC(tx_ring, i);
1020
1021 i++;
1022 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
1023
1024 /* set bits to identify this as an advanced context descriptor */
1025 type_tucmd |= IGC_TXD_CMD_DEXT | IGC_ADVTXD_DTYP_CTXT;
1026
93d85dc5 1027 /* For i225, context index must be unique per ring. */
d3ae3cfb
SN
1028 if (test_bit(IGC_RING_FLAG_TX_CTX_IDX, &tx_ring->flags))
1029 mss_l4len_idx |= tx_ring->reg_idx << 4;
1030
1031 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
1032 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd);
1033 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
1034
1035 /* We assume there is always a valid Tx time available. Invalid times
1036 * should have been handled by the upper layers.
1037 */
1038 if (tx_ring->launchtime_enable) {
82faa9b7
VCG
1039 struct igc_adapter *adapter = netdev_priv(tx_ring->netdev);
1040 ktime_t txtime = first->skb->tstamp;
1041
847cbfc0 1042 skb_txtime_consumed(first->skb);
82faa9b7
VCG
1043 context_desc->launch_time = igc_tx_launchtime(adapter,
1044 txtime);
d3ae3cfb
SN
1045 } else {
1046 context_desc->launch_time = 0;
1047 }
1048}
1049
0507ef8a
SN
1050static void igc_tx_csum(struct igc_ring *tx_ring, struct igc_tx_buffer *first)
1051{
d3ae3cfb
SN
1052 struct sk_buff *skb = first->skb;
1053 u32 vlan_macip_lens = 0;
1054 u32 type_tucmd = 0;
1055
1056 if (skb->ip_summed != CHECKSUM_PARTIAL) {
1057csum_failed:
1058 if (!(first->tx_flags & IGC_TX_FLAGS_VLAN) &&
1059 !tx_ring->launchtime_enable)
1060 return;
1061 goto no_csum;
1062 }
1063
1064 switch (skb->csum_offset) {
1065 case offsetof(struct tcphdr, check):
1066 type_tucmd = IGC_ADVTXD_TUCMD_L4T_TCP;
5463fce6 1067 fallthrough;
d3ae3cfb
SN
1068 case offsetof(struct udphdr, check):
1069 break;
1070 case offsetof(struct sctphdr, checksum):
1071 /* validate that this is actually an SCTP request */
609d29a9 1072 if (skb_csum_is_sctp(skb)) {
d3ae3cfb
SN
1073 type_tucmd = IGC_ADVTXD_TUCMD_L4T_SCTP;
1074 break;
1075 }
5463fce6 1076 fallthrough;
d3ae3cfb
SN
1077 default:
1078 skb_checksum_help(skb);
1079 goto csum_failed;
1080 }
1081
1082 /* update TX checksum flag */
1083 first->tx_flags |= IGC_TX_FLAGS_CSUM;
1084 vlan_macip_lens = skb_checksum_start_offset(skb) -
1085 skb_network_offset(skb);
1086no_csum:
1087 vlan_macip_lens |= skb_network_offset(skb) << IGC_ADVTXD_MACLEN_SHIFT;
1088 vlan_macip_lens |= first->tx_flags & IGC_TX_FLAGS_VLAN_MASK;
1089
1090 igc_tx_ctxtdesc(tx_ring, first, vlan_macip_lens, type_tucmd, 0);
0507ef8a
SN
1091}
1092
1093static int __igc_maybe_stop_tx(struct igc_ring *tx_ring, const u16 size)
1094{
1095 struct net_device *netdev = tx_ring->netdev;
1096
1097 netif_stop_subqueue(netdev, tx_ring->queue_index);
1098
1099 /* memory barriier comment */
1100 smp_mb();
1101
1102 /* We need to check again in a case another CPU has just
1103 * made room available.
1104 */
1105 if (igc_desc_unused(tx_ring) < size)
1106 return -EBUSY;
1107
1108 /* A reprieve! */
1109 netif_wake_subqueue(netdev, tx_ring->queue_index);
1110
1111 u64_stats_update_begin(&tx_ring->tx_syncp2);
1112 tx_ring->tx_stats.restart_queue2++;
1113 u64_stats_update_end(&tx_ring->tx_syncp2);
1114
1115 return 0;
1116}
1117
1118static inline int igc_maybe_stop_tx(struct igc_ring *tx_ring, const u16 size)
1119{
1120 if (igc_desc_unused(tx_ring) >= size)
1121 return 0;
1122 return __igc_maybe_stop_tx(tx_ring, size);
1123}
1124
2c344ae2
VCG
1125#define IGC_SET_FLAG(_input, _flag, _result) \
1126 (((_flag) <= (_result)) ? \
1127 ((u32)((_input) & (_flag)) * ((_result) / (_flag))) : \
1128 ((u32)((_input) & (_flag)) / ((_flag) / (_result))))
1129
8d744963 1130static u32 igc_tx_cmd_type(struct sk_buff *skb, u32 tx_flags)
0507ef8a
SN
1131{
1132 /* set type for advanced descriptor with frame checksum insertion */
1133 u32 cmd_type = IGC_ADVTXD_DTYP_DATA |
1134 IGC_ADVTXD_DCMD_DEXT |
1135 IGC_ADVTXD_DCMD_IFCS;
1136
8d744963
MHZ
1137 /* set HW vlan bit if vlan is present */
1138 cmd_type |= IGC_SET_FLAG(tx_flags, IGC_TX_FLAGS_VLAN,
1139 IGC_ADVTXD_DCMD_VLE);
1140
f38b782d
SN
1141 /* set segmentation bits for TSO */
1142 cmd_type |= IGC_SET_FLAG(tx_flags, IGC_TX_FLAGS_TSO,
1143 (IGC_ADVTXD_DCMD_TSE));
1144
2c344ae2
VCG
1145 /* set timestamp bit if present */
1146 cmd_type |= IGC_SET_FLAG(tx_flags, IGC_TX_FLAGS_TSTAMP,
1147 (IGC_ADVTXD_MAC_TSTAMP));
1148
8d744963
MHZ
1149 /* insert frame checksum */
1150 cmd_type ^= IGC_SET_FLAG(skb->no_fcs, 1, IGC_ADVTXD_DCMD_IFCS);
1151
0507ef8a
SN
1152 return cmd_type;
1153}
1154
1155static void igc_tx_olinfo_status(struct igc_ring *tx_ring,
1156 union igc_adv_tx_desc *tx_desc,
1157 u32 tx_flags, unsigned int paylen)
1158{
1159 u32 olinfo_status = paylen << IGC_ADVTXD_PAYLEN_SHIFT;
1160
1161 /* insert L4 checksum */
1162 olinfo_status |= (tx_flags & IGC_TX_FLAGS_CSUM) *
1163 ((IGC_TXD_POPTS_TXSM << 8) /
1164 IGC_TX_FLAGS_CSUM);
1165
1166 /* insert IPv4 checksum */
1167 olinfo_status |= (tx_flags & IGC_TX_FLAGS_IPV4) *
1168 (((IGC_TXD_POPTS_IXSM << 8)) /
1169 IGC_TX_FLAGS_IPV4);
1170
1171 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
1172}
1173
1174static int igc_tx_map(struct igc_ring *tx_ring,
1175 struct igc_tx_buffer *first,
1176 const u8 hdr_len)
1177{
1178 struct sk_buff *skb = first->skb;
1179 struct igc_tx_buffer *tx_buffer;
1180 union igc_adv_tx_desc *tx_desc;
1181 u32 tx_flags = first->tx_flags;
d7840976 1182 skb_frag_t *frag;
0507ef8a
SN
1183 u16 i = tx_ring->next_to_use;
1184 unsigned int data_len, size;
1185 dma_addr_t dma;
8d744963 1186 u32 cmd_type;
0507ef8a 1187
8d744963 1188 cmd_type = igc_tx_cmd_type(skb, tx_flags);
0507ef8a
SN
1189 tx_desc = IGC_TX_DESC(tx_ring, i);
1190
1191 igc_tx_olinfo_status(tx_ring, tx_desc, tx_flags, skb->len - hdr_len);
1192
1193 size = skb_headlen(skb);
1194 data_len = skb->data_len;
1195
1196 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
1197
1198 tx_buffer = first;
1199
1200 for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
1201 if (dma_mapping_error(tx_ring->dev, dma))
1202 goto dma_error;
1203
1204 /* record length, and DMA address */
1205 dma_unmap_len_set(tx_buffer, len, size);
1206 dma_unmap_addr_set(tx_buffer, dma, dma);
1207
1208 tx_desc->read.buffer_addr = cpu_to_le64(dma);
1209
1210 while (unlikely(size > IGC_MAX_DATA_PER_TXD)) {
1211 tx_desc->read.cmd_type_len =
1212 cpu_to_le32(cmd_type ^ IGC_MAX_DATA_PER_TXD);
1213
1214 i++;
1215 tx_desc++;
1216 if (i == tx_ring->count) {
1217 tx_desc = IGC_TX_DESC(tx_ring, 0);
1218 i = 0;
1219 }
1220 tx_desc->read.olinfo_status = 0;
1221
1222 dma += IGC_MAX_DATA_PER_TXD;
1223 size -= IGC_MAX_DATA_PER_TXD;
1224
1225 tx_desc->read.buffer_addr = cpu_to_le64(dma);
1226 }
1227
1228 if (likely(!data_len))
1229 break;
1230
1231 tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type ^ size);
1232
1233 i++;
1234 tx_desc++;
1235 if (i == tx_ring->count) {
1236 tx_desc = IGC_TX_DESC(tx_ring, 0);
1237 i = 0;
1238 }
1239 tx_desc->read.olinfo_status = 0;
1240
1241 size = skb_frag_size(frag);
1242 data_len -= size;
1243
1244 dma = skb_frag_dma_map(tx_ring->dev, frag, 0,
1245 size, DMA_TO_DEVICE);
1246
1247 tx_buffer = &tx_ring->tx_buffer_info[i];
1248 }
1249
1250 /* write last descriptor with RS and EOP bits */
1251 cmd_type |= size | IGC_TXD_DCMD;
1252 tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type);
1253
1254 netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
1255
1256 /* set the timestamp */
1257 first->time_stamp = jiffies;
1258
a9e51058
JK
1259 skb_tx_timestamp(skb);
1260
0507ef8a
SN
1261 /* Force memory writes to complete before letting h/w know there
1262 * are new descriptors to fetch. (Only applicable for weak-ordered
1263 * memory model archs, such as IA-64).
1264 *
1265 * We also need this memory barrier to make certain all of the
1266 * status bits have been updated before next_to_watch is written.
1267 */
1268 wmb();
1269
1270 /* set next_to_watch value indicating a packet is present */
1271 first->next_to_watch = tx_desc;
1272
1273 i++;
1274 if (i == tx_ring->count)
1275 i = 0;
1276
1277 tx_ring->next_to_use = i;
1278
1279 /* Make sure there is space in the ring for the next send. */
1280 igc_maybe_stop_tx(tx_ring, DESC_NEEDED);
1281
6b16f9ee 1282 if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) {
0507ef8a 1283 writel(i, tx_ring->tail);
0507ef8a
SN
1284 }
1285
1286 return 0;
1287dma_error:
25f06eff 1288 netdev_err(tx_ring->netdev, "TX DMA map failed\n");
0507ef8a
SN
1289 tx_buffer = &tx_ring->tx_buffer_info[i];
1290
1291 /* clear dma mappings for failed tx_buffer_info map */
1292 while (tx_buffer != first) {
1293 if (dma_unmap_len(tx_buffer, len))
61234295 1294 igc_unmap_tx_buffer(tx_ring->dev, tx_buffer);
0507ef8a
SN
1295
1296 if (i-- == 0)
1297 i += tx_ring->count;
1298 tx_buffer = &tx_ring->tx_buffer_info[i];
1299 }
1300
1301 if (dma_unmap_len(tx_buffer, len))
61234295 1302 igc_unmap_tx_buffer(tx_ring->dev, tx_buffer);
0507ef8a
SN
1303
1304 dev_kfree_skb_any(tx_buffer->skb);
1305 tx_buffer->skb = NULL;
1306
1307 tx_ring->next_to_use = i;
1308
1309 return -1;
1310}
1311
f38b782d
SN
1312static int igc_tso(struct igc_ring *tx_ring,
1313 struct igc_tx_buffer *first,
1314 u8 *hdr_len)
1315{
1316 u32 vlan_macip_lens, type_tucmd, mss_l4len_idx;
1317 struct sk_buff *skb = first->skb;
1318 union {
1319 struct iphdr *v4;
1320 struct ipv6hdr *v6;
1321 unsigned char *hdr;
1322 } ip;
1323 union {
1324 struct tcphdr *tcp;
1325 struct udphdr *udp;
1326 unsigned char *hdr;
1327 } l4;
1328 u32 paylen, l4_offset;
1329 int err;
1330
1331 if (skb->ip_summed != CHECKSUM_PARTIAL)
1332 return 0;
1333
1334 if (!skb_is_gso(skb))
1335 return 0;
1336
1337 err = skb_cow_head(skb, 0);
1338 if (err < 0)
1339 return err;
1340
1341 ip.hdr = skb_network_header(skb);
1342 l4.hdr = skb_checksum_start(skb);
1343
1344 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
1345 type_tucmd = IGC_ADVTXD_TUCMD_L4T_TCP;
1346
1347 /* initialize outer IP header fields */
1348 if (ip.v4->version == 4) {
1349 unsigned char *csum_start = skb_checksum_start(skb);
1350 unsigned char *trans_start = ip.hdr + (ip.v4->ihl * 4);
1351
1352 /* IP header will have to cancel out any data that
1353 * is not a part of the outer IP header
1354 */
1355 ip.v4->check = csum_fold(csum_partial(trans_start,
1356 csum_start - trans_start,
1357 0));
1358 type_tucmd |= IGC_ADVTXD_TUCMD_IPV4;
1359
1360 ip.v4->tot_len = 0;
1361 first->tx_flags |= IGC_TX_FLAGS_TSO |
1362 IGC_TX_FLAGS_CSUM |
1363 IGC_TX_FLAGS_IPV4;
1364 } else {
1365 ip.v6->payload_len = 0;
1366 first->tx_flags |= IGC_TX_FLAGS_TSO |
1367 IGC_TX_FLAGS_CSUM;
1368 }
1369
1370 /* determine offset of inner transport header */
1371 l4_offset = l4.hdr - skb->data;
1372
1373 /* remove payload length from inner checksum */
1374 paylen = skb->len - l4_offset;
1375 if (type_tucmd & IGC_ADVTXD_TUCMD_L4T_TCP) {
1376 /* compute length of segmentation header */
1377 *hdr_len = (l4.tcp->doff * 4) + l4_offset;
1378 csum_replace_by_diff(&l4.tcp->check,
1379 (__force __wsum)htonl(paylen));
1380 } else {
1381 /* compute length of segmentation header */
1382 *hdr_len = sizeof(*l4.udp) + l4_offset;
1383 csum_replace_by_diff(&l4.udp->check,
1384 (__force __wsum)htonl(paylen));
1385 }
1386
1387 /* update gso size and bytecount with header size */
1388 first->gso_segs = skb_shinfo(skb)->gso_segs;
1389 first->bytecount += (first->gso_segs - 1) * *hdr_len;
1390
1391 /* MSS L4LEN IDX */
1392 mss_l4len_idx = (*hdr_len - l4_offset) << IGC_ADVTXD_L4LEN_SHIFT;
1393 mss_l4len_idx |= skb_shinfo(skb)->gso_size << IGC_ADVTXD_MSS_SHIFT;
1394
1395 /* VLAN MACLEN IPLEN */
1396 vlan_macip_lens = l4.hdr - ip.hdr;
1397 vlan_macip_lens |= (ip.hdr - skb->data) << IGC_ADVTXD_MACLEN_SHIFT;
1398 vlan_macip_lens |= first->tx_flags & IGC_TX_FLAGS_VLAN_MASK;
1399
1400 igc_tx_ctxtdesc(tx_ring, first, vlan_macip_lens,
1401 type_tucmd, mss_l4len_idx);
1402
1403 return 1;
1404}
1405
0507ef8a
SN
1406static netdev_tx_t igc_xmit_frame_ring(struct sk_buff *skb,
1407 struct igc_ring *tx_ring)
1408{
1409 u16 count = TXD_USE_COUNT(skb_headlen(skb));
1410 __be16 protocol = vlan_get_protocol(skb);
1411 struct igc_tx_buffer *first;
1412 u32 tx_flags = 0;
1413 unsigned short f;
1414 u8 hdr_len = 0;
f38b782d 1415 int tso = 0;
0507ef8a
SN
1416
1417 /* need: 1 descriptor per page * PAGE_SIZE/IGC_MAX_DATA_PER_TXD,
1418 * + 1 desc for skb_headlen/IGC_MAX_DATA_PER_TXD,
1419 * + 2 desc gap to keep tail from touching head,
1420 * + 1 desc for context descriptor,
1421 * otherwise try next time
1422 */
1423 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
d7840976
MWO
1424 count += TXD_USE_COUNT(skb_frag_size(
1425 &skb_shinfo(skb)->frags[f]));
0507ef8a
SN
1426
1427 if (igc_maybe_stop_tx(tx_ring, count + 3)) {
1428 /* this is a hard error */
1429 return NETDEV_TX_BUSY;
1430 }
1431
1432 /* record the location of the first descriptor for this packet */
1433 first = &tx_ring->tx_buffer_info[tx_ring->next_to_use];
859b4dfa 1434 first->type = IGC_TX_BUFFER_TYPE_SKB;
0507ef8a
SN
1435 first->skb = skb;
1436 first->bytecount = skb->len;
1437 first->gso_segs = 1;
1438
2c344ae2
VCG
1439 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
1440 struct igc_adapter *adapter = netdev_priv(tx_ring->netdev);
1441
1442 /* FIXME: add support for retrieving timestamps from
1443 * the other timer registers before skipping the
1444 * timestamping request.
1445 */
1446 if (adapter->tstamp_config.tx_type == HWTSTAMP_TX_ON &&
1447 !test_and_set_bit_lock(__IGC_PTP_TX_IN_PROGRESS,
1448 &adapter->state)) {
1449 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
1450 tx_flags |= IGC_TX_FLAGS_TSTAMP;
1451
1452 adapter->ptp_tx_skb = skb_get(skb);
1453 adapter->ptp_tx_start = jiffies;
1454 } else {
1455 adapter->tx_hwtstamp_skipped++;
1456 }
1457 }
1458
8d744963
MHZ
1459 if (skb_vlan_tag_present(skb)) {
1460 tx_flags |= IGC_TX_FLAGS_VLAN;
1461 tx_flags |= (skb_vlan_tag_get(skb) << IGC_TX_FLAGS_VLAN_SHIFT);
1462 }
1463
0507ef8a
SN
1464 /* record initial flags and protocol */
1465 first->tx_flags = tx_flags;
1466 first->protocol = protocol;
1467
f38b782d
SN
1468 tso = igc_tso(tx_ring, first, &hdr_len);
1469 if (tso < 0)
1470 goto out_drop;
1471 else if (!tso)
1472 igc_tx_csum(tx_ring, first);
0507ef8a
SN
1473
1474 igc_tx_map(tx_ring, first, hdr_len);
1475
f38b782d
SN
1476 return NETDEV_TX_OK;
1477
1478out_drop:
1479 dev_kfree_skb_any(first->skb);
1480 first->skb = NULL;
1481
0507ef8a
SN
1482 return NETDEV_TX_OK;
1483}
1484
1485static inline struct igc_ring *igc_tx_queue_mapping(struct igc_adapter *adapter,
1486 struct sk_buff *skb)
1487{
1488 unsigned int r_idx = skb->queue_mapping;
1489
1490 if (r_idx >= adapter->num_tx_queues)
1491 r_idx = r_idx % adapter->num_tx_queues;
1492
1493 return adapter->tx_ring[r_idx];
1494}
1495
c9a11c23
SN
1496static netdev_tx_t igc_xmit_frame(struct sk_buff *skb,
1497 struct net_device *netdev)
1498{
0507ef8a
SN
1499 struct igc_adapter *adapter = netdev_priv(netdev);
1500
1501 /* The minimum packet size with TCTL.PSP set is 17 so pad the skb
1502 * in order to meet this minimum size requirement.
1503 */
1504 if (skb->len < 17) {
1505 if (skb_padto(skb, 17))
1506 return NETDEV_TX_OK;
1507 skb->len = 17;
1508 }
1509
1510 return igc_xmit_frame_ring(skb, igc_tx_queue_mapping(adapter, skb));
c9a11c23
SN
1511}
1512
3bdd7086
SN
1513static void igc_rx_checksum(struct igc_ring *ring,
1514 union igc_adv_rx_desc *rx_desc,
1515 struct sk_buff *skb)
1516{
1517 skb_checksum_none_assert(skb);
1518
1519 /* Ignore Checksum bit is set */
1520 if (igc_test_staterr(rx_desc, IGC_RXD_STAT_IXSM))
1521 return;
1522
1523 /* Rx checksum disabled via ethtool */
1524 if (!(ring->netdev->features & NETIF_F_RXCSUM))
1525 return;
1526
1527 /* TCP/UDP checksum error bit is set */
1528 if (igc_test_staterr(rx_desc,
ef8a17a2 1529 IGC_RXDEXT_STATERR_L4E |
3bdd7086
SN
1530 IGC_RXDEXT_STATERR_IPE)) {
1531 /* work around errata with sctp packets where the TCPE aka
1532 * L4E bit is set incorrectly on 64 byte (60 byte w/o crc)
1533 * packets (aka let the stack check the crc32c)
1534 */
1535 if (!(skb->len == 60 &&
1536 test_bit(IGC_RING_FLAG_RX_SCTP_CSUM, &ring->flags))) {
1537 u64_stats_update_begin(&ring->rx_syncp);
1538 ring->rx_stats.csum_err++;
1539 u64_stats_update_end(&ring->rx_syncp);
1540 }
1541 /* let the stack verify checksum errors */
1542 return;
1543 }
1544 /* It must be a TCP or UDP packet with a valid checksum */
1545 if (igc_test_staterr(rx_desc, IGC_RXD_STAT_TCPCS |
1546 IGC_RXD_STAT_UDPCS))
1547 skb->ip_summed = CHECKSUM_UNNECESSARY;
1548
25f06eff
AG
1549 netdev_dbg(ring->netdev, "cksum success: bits %08X\n",
1550 le32_to_cpu(rx_desc->wb.upper.status_error));
3bdd7086
SN
1551}
1552
0507ef8a
SN
1553static inline void igc_rx_hash(struct igc_ring *ring,
1554 union igc_adv_rx_desc *rx_desc,
1555 struct sk_buff *skb)
13b5b7fd 1556{
0507ef8a
SN
1557 if (ring->netdev->features & NETIF_F_RXHASH)
1558 skb_set_hash(skb,
1559 le32_to_cpu(rx_desc->wb.lower.hi_dword.rss),
1560 PKT_HASH_TYPE_L3);
13b5b7fd
SN
1561}
1562
8d744963
MHZ
1563static void igc_rx_vlan(struct igc_ring *rx_ring,
1564 union igc_adv_rx_desc *rx_desc,
1565 struct sk_buff *skb)
1566{
1567 struct net_device *dev = rx_ring->netdev;
1568 u16 vid;
1569
1570 if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
1571 igc_test_staterr(rx_desc, IGC_RXD_STAT_VP)) {
1572 if (igc_test_staterr(rx_desc, IGC_RXDEXT_STATERR_LB) &&
1573 test_bit(IGC_RING_FLAG_RX_LB_VLAN_BSWAP, &rx_ring->flags))
1574 vid = be16_to_cpu((__force __be16)rx_desc->wb.upper.vlan);
1575 else
1576 vid = le16_to_cpu(rx_desc->wb.upper.vlan);
1577
1578 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
1579 }
1580}
1581
0507ef8a
SN
1582/**
1583 * igc_process_skb_fields - Populate skb header fields from Rx descriptor
1584 * @rx_ring: rx descriptor ring packet is being transacted on
1585 * @rx_desc: pointer to the EOP Rx descriptor
1586 * @skb: pointer to current skb being populated
1587 *
3a66abe9
AG
1588 * This function checks the ring, descriptor, and packet information in order
1589 * to populate the hash, checksum, VLAN, protocol, and other fields within the
1590 * skb.
0507ef8a
SN
1591 */
1592static void igc_process_skb_fields(struct igc_ring *rx_ring,
1593 union igc_adv_rx_desc *rx_desc,
1594 struct sk_buff *skb)
13b5b7fd 1595{
0507ef8a 1596 igc_rx_hash(rx_ring, rx_desc, skb);
13b5b7fd 1597
3bdd7086
SN
1598 igc_rx_checksum(rx_ring, rx_desc, skb);
1599
8d744963
MHZ
1600 igc_rx_vlan(rx_ring, rx_desc, skb);
1601
0507ef8a 1602 skb_record_rx_queue(skb, rx_ring->queue_index);
13b5b7fd 1603
0507ef8a
SN
1604 skb->protocol = eth_type_trans(skb, rx_ring->netdev);
1605}
1606
8d744963
MHZ
1607static void igc_vlan_mode(struct net_device *netdev, netdev_features_t features)
1608{
1609 bool enable = !!(features & NETIF_F_HW_VLAN_CTAG_RX);
1610 struct igc_adapter *adapter = netdev_priv(netdev);
1611 struct igc_hw *hw = &adapter->hw;
1612 u32 ctrl;
1613
1614 ctrl = rd32(IGC_CTRL);
1615
1616 if (enable) {
1617 /* enable VLAN tag insert/strip */
1618 ctrl |= IGC_CTRL_VME;
1619 } else {
1620 /* disable VLAN tag insert/strip */
1621 ctrl &= ~IGC_CTRL_VME;
1622 }
1623 wr32(IGC_CTRL, ctrl);
1624}
1625
1626static void igc_restore_vlan(struct igc_adapter *adapter)
1627{
1628 igc_vlan_mode(adapter->netdev, adapter->netdev->features);
1629}
1630
0507ef8a 1631static struct igc_rx_buffer *igc_get_rx_buffer(struct igc_ring *rx_ring,
4ff32036
AG
1632 const unsigned int size,
1633 int *rx_buffer_pgcnt)
0507ef8a
SN
1634{
1635 struct igc_rx_buffer *rx_buffer;
1636
1637 rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
4ff32036
AG
1638 *rx_buffer_pgcnt =
1639#if (PAGE_SIZE < 8192)
1640 page_count(rx_buffer->page);
1641#else
1642 0;
1643#endif
0507ef8a
SN
1644 prefetchw(rx_buffer->page);
1645
1646 /* we are reusing so sync this buffer for CPU use */
1647 dma_sync_single_range_for_cpu(rx_ring->dev,
1648 rx_buffer->dma,
1649 rx_buffer->page_offset,
1650 size,
1651 DMA_FROM_DEVICE);
1652
1653 rx_buffer->pagecnt_bias--;
1654
1655 return rx_buffer;
1656}
1657
613cf199
AG
1658static void igc_rx_buffer_flip(struct igc_rx_buffer *buffer,
1659 unsigned int truesize)
1660{
1661#if (PAGE_SIZE < 8192)
1662 buffer->page_offset ^= truesize;
1663#else
1664 buffer->page_offset += truesize;
1665#endif
1666}
1667
a39f5e53
AG
1668static unsigned int igc_get_rx_frame_truesize(struct igc_ring *ring,
1669 unsigned int size)
1670{
1671 unsigned int truesize;
1672
1673#if (PAGE_SIZE < 8192)
1674 truesize = igc_rx_pg_size(ring) / 2;
1675#else
1676 truesize = ring_uses_build_skb(ring) ?
1677 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
1678 SKB_DATA_ALIGN(IGC_SKB_PAD + size) :
1679 SKB_DATA_ALIGN(size);
1680#endif
1681 return truesize;
1682}
1683
0507ef8a
SN
1684/**
1685 * igc_add_rx_frag - Add contents of Rx buffer to sk_buff
1686 * @rx_ring: rx descriptor ring to transact packets on
1687 * @rx_buffer: buffer containing page to add
1688 * @skb: sk_buff to place the data into
1689 * @size: size of buffer to be added
1690 *
1691 * This function will add the data contained in rx_buffer->page to the skb.
1692 */
1693static void igc_add_rx_frag(struct igc_ring *rx_ring,
1694 struct igc_rx_buffer *rx_buffer,
1695 struct sk_buff *skb,
1696 unsigned int size)
1697{
613cf199 1698 unsigned int truesize;
0507ef8a 1699
613cf199
AG
1700#if (PAGE_SIZE < 8192)
1701 truesize = igc_rx_pg_size(rx_ring) / 2;
0507ef8a 1702#else
613cf199
AG
1703 truesize = ring_uses_build_skb(rx_ring) ?
1704 SKB_DATA_ALIGN(IGC_SKB_PAD + size) :
1705 SKB_DATA_ALIGN(size);
1706#endif
0507ef8a
SN
1707 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page,
1708 rx_buffer->page_offset, size, truesize);
613cf199
AG
1709
1710 igc_rx_buffer_flip(rx_buffer, truesize);
0507ef8a
SN
1711}
1712
1713static struct sk_buff *igc_build_skb(struct igc_ring *rx_ring,
1714 struct igc_rx_buffer *rx_buffer,
1715 union igc_adv_rx_desc *rx_desc,
1716 unsigned int size)
1717{
1718 void *va = page_address(rx_buffer->page) + rx_buffer->page_offset;
a39f5e53 1719 unsigned int truesize = igc_get_rx_frame_truesize(rx_ring, size);
0507ef8a
SN
1720 struct sk_buff *skb;
1721
1722 /* prefetch first cache line of first page */
f468f21b 1723 net_prefetch(va);
0507ef8a
SN
1724
1725 /* build an skb around the page buffer */
1726 skb = build_skb(va - IGC_SKB_PAD, truesize);
1727 if (unlikely(!skb))
1728 return NULL;
1729
1730 /* update pointers within the skb to store the data */
1731 skb_reserve(skb, IGC_SKB_PAD);
bb9089b6 1732 __skb_put(skb, size);
0507ef8a 1733
613cf199 1734 igc_rx_buffer_flip(rx_buffer, truesize);
0507ef8a
SN
1735 return skb;
1736}
1737
1738static struct sk_buff *igc_construct_skb(struct igc_ring *rx_ring,
1739 struct igc_rx_buffer *rx_buffer,
26575105 1740 struct xdp_buff *xdp,
e1ed4f92 1741 ktime_t timestamp)
0507ef8a 1742{
26575105 1743 unsigned int size = xdp->data_end - xdp->data;
a39f5e53 1744 unsigned int truesize = igc_get_rx_frame_truesize(rx_ring, size);
26575105 1745 void *va = xdp->data;
0507ef8a
SN
1746 unsigned int headlen;
1747 struct sk_buff *skb;
1748
1749 /* prefetch first cache line of first page */
f468f21b 1750 net_prefetch(va);
0507ef8a
SN
1751
1752 /* allocate a skb to store the frags */
1753 skb = napi_alloc_skb(&rx_ring->q_vector->napi, IGC_RX_HDR_LEN);
1754 if (unlikely(!skb))
1755 return NULL;
1756
e1ed4f92
AG
1757 if (timestamp)
1758 skb_hwtstamps(skb)->hwtstamp = timestamp;
81b05520 1759
0507ef8a
SN
1760 /* Determine available headroom for copy */
1761 headlen = size;
1762 if (headlen > IGC_RX_HDR_LEN)
c43f1255 1763 headlen = eth_get_headlen(skb->dev, va, IGC_RX_HDR_LEN);
0507ef8a
SN
1764
1765 /* align pull length to size of long to optimize memcpy performance */
1766 memcpy(__skb_put(skb, headlen), va, ALIGN(headlen, sizeof(long)));
1767
1768 /* update all of the pointers */
1769 size -= headlen;
1770 if (size) {
1771 skb_add_rx_frag(skb, 0, rx_buffer->page,
1772 (va + headlen) - page_address(rx_buffer->page),
1773 size, truesize);
613cf199 1774 igc_rx_buffer_flip(rx_buffer, truesize);
0507ef8a
SN
1775 } else {
1776 rx_buffer->pagecnt_bias++;
13b5b7fd
SN
1777 }
1778
0507ef8a
SN
1779 return skb;
1780}
13b5b7fd 1781
0507ef8a
SN
1782/**
1783 * igc_reuse_rx_page - page flip buffer and store it back on the ring
1784 * @rx_ring: rx descriptor ring to store buffers on
1785 * @old_buff: donor buffer to have page reused
1786 *
1787 * Synchronizes page for reuse by the adapter
1788 */
1789static void igc_reuse_rx_page(struct igc_ring *rx_ring,
1790 struct igc_rx_buffer *old_buff)
1791{
1792 u16 nta = rx_ring->next_to_alloc;
1793 struct igc_rx_buffer *new_buff;
1794
1795 new_buff = &rx_ring->rx_buffer_info[nta];
1796
1797 /* update, and store next to alloc */
1798 nta++;
1799 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
1800
1801 /* Transfer page from old buffer to new buffer.
1802 * Move each member individually to avoid possible store
1803 * forwarding stalls.
13b5b7fd 1804 */
0507ef8a
SN
1805 new_buff->dma = old_buff->dma;
1806 new_buff->page = old_buff->page;
1807 new_buff->page_offset = old_buff->page_offset;
1808 new_buff->pagecnt_bias = old_buff->pagecnt_bias;
1809}
13b5b7fd 1810
4ff32036
AG
1811static bool igc_can_reuse_rx_page(struct igc_rx_buffer *rx_buffer,
1812 int rx_buffer_pgcnt)
0507ef8a
SN
1813{
1814 unsigned int pagecnt_bias = rx_buffer->pagecnt_bias;
1815 struct page *page = rx_buffer->page;
1816
a79afa78
AL
1817 /* avoid re-using remote and pfmemalloc pages */
1818 if (!dev_page_is_reusable(page))
0507ef8a
SN
1819 return false;
1820
1821#if (PAGE_SIZE < 8192)
1822 /* if we are only owner of page we can reuse it */
4ff32036 1823 if (unlikely((rx_buffer_pgcnt - pagecnt_bias) > 1))
13b5b7fd 1824 return false;
0507ef8a
SN
1825#else
1826#define IGC_LAST_OFFSET \
1827 (SKB_WITH_OVERHEAD(PAGE_SIZE) - IGC_RXBUFFER_2048)
1828
1829 if (rx_buffer->page_offset > IGC_LAST_OFFSET)
1830 return false;
1831#endif
1832
1833 /* If we have drained the page fragment pool we need to update
1834 * the pagecnt_bias and page count so that we fully restock the
1835 * number of references the driver holds.
1836 */
4ff32036
AG
1837 if (unlikely(pagecnt_bias == 1)) {
1838 page_ref_add(page, USHRT_MAX - 1);
0507ef8a 1839 rx_buffer->pagecnt_bias = USHRT_MAX;
13b5b7fd
SN
1840 }
1841
0507ef8a
SN
1842 return true;
1843}
1844
1845/**
1846 * igc_is_non_eop - process handling of non-EOP buffers
1847 * @rx_ring: Rx ring being processed
1848 * @rx_desc: Rx descriptor for current buffer
0507ef8a
SN
1849 *
1850 * This function updates next to clean. If the buffer is an EOP buffer
1851 * this function exits returning false, otherwise it will place the
1852 * sk_buff in the next buffer to be chained and return true indicating
1853 * that this is in fact a non-EOP buffer.
1854 */
1855static bool igc_is_non_eop(struct igc_ring *rx_ring,
1856 union igc_adv_rx_desc *rx_desc)
1857{
1858 u32 ntc = rx_ring->next_to_clean + 1;
1859
1860 /* fetch, update, and store next to clean */
1861 ntc = (ntc < rx_ring->count) ? ntc : 0;
1862 rx_ring->next_to_clean = ntc;
1863
1864 prefetch(IGC_RX_DESC(rx_ring, ntc));
1865
1866 if (likely(igc_test_staterr(rx_desc, IGC_RXD_STAT_EOP)))
1867 return false;
13b5b7fd
SN
1868
1869 return true;
1870}
1871
0507ef8a
SN
1872/**
1873 * igc_cleanup_headers - Correct corrupted or empty headers
1874 * @rx_ring: rx descriptor ring packet is being transacted on
1875 * @rx_desc: pointer to the EOP Rx descriptor
1876 * @skb: pointer to current skb being fixed
1877 *
1878 * Address the case where we are pulling data in on pages only
1879 * and as such no data is present in the skb header.
1880 *
1881 * In addition if skb is not at least 60 bytes we need to pad it so that
1882 * it is large enough to qualify as a valid Ethernet frame.
1883 *
1884 * Returns true if an error was encountered and skb was freed.
1885 */
1886static bool igc_cleanup_headers(struct igc_ring *rx_ring,
1887 union igc_adv_rx_desc *rx_desc,
1888 struct sk_buff *skb)
1889{
26575105
AG
1890 /* XDP packets use error pointer so abort at this point */
1891 if (IS_ERR(skb))
1892 return true;
1893
ef8a17a2 1894 if (unlikely(igc_test_staterr(rx_desc, IGC_RXDEXT_STATERR_RXE))) {
0507ef8a
SN
1895 struct net_device *netdev = rx_ring->netdev;
1896
1897 if (!(netdev->features & NETIF_F_RXALL)) {
1898 dev_kfree_skb_any(skb);
1899 return true;
1900 }
1901 }
1902
1903 /* if eth_skb_pad returns an error the skb was freed */
1904 if (eth_skb_pad(skb))
1905 return true;
1906
1907 return false;
1908}
1909
1910static void igc_put_rx_buffer(struct igc_ring *rx_ring,
4ff32036
AG
1911 struct igc_rx_buffer *rx_buffer,
1912 int rx_buffer_pgcnt)
0507ef8a 1913{
4ff32036 1914 if (igc_can_reuse_rx_page(rx_buffer, rx_buffer_pgcnt)) {
0507ef8a
SN
1915 /* hand second half of page back to the ring */
1916 igc_reuse_rx_page(rx_ring, rx_buffer);
1917 } else {
1918 /* We are not reusing the buffer so unmap it and free
1919 * any references we are holding to it
1920 */
1921 dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma,
1922 igc_rx_pg_size(rx_ring), DMA_FROM_DEVICE,
1923 IGC_RX_DMA_ATTR);
1924 __page_frag_cache_drain(rx_buffer->page,
1925 rx_buffer->pagecnt_bias);
1926 }
1927
1928 /* clear contents of rx_buffer */
1929 rx_buffer->page = NULL;
1930}
1931
aac8f68c
SN
1932static inline unsigned int igc_rx_offset(struct igc_ring *rx_ring)
1933{
26575105
AG
1934 struct igc_adapter *adapter = rx_ring->q_vector->adapter;
1935
1936 if (ring_uses_build_skb(rx_ring))
1937 return IGC_SKB_PAD;
1938 if (igc_xdp_is_enabled(adapter))
1939 return XDP_PACKET_HEADROOM;
1940
1941 return 0;
aac8f68c
SN
1942}
1943
1944static bool igc_alloc_mapped_page(struct igc_ring *rx_ring,
1945 struct igc_rx_buffer *bi)
1946{
1947 struct page *page = bi->page;
1948 dma_addr_t dma;
1949
1950 /* since we are recycling buffers we should seldom need to alloc */
1951 if (likely(page))
1952 return true;
1953
1954 /* alloc new page for storage */
1955 page = dev_alloc_pages(igc_rx_pg_order(rx_ring));
1956 if (unlikely(!page)) {
1957 rx_ring->rx_stats.alloc_failed++;
1958 return false;
1959 }
1960
1961 /* map page for use */
1962 dma = dma_map_page_attrs(rx_ring->dev, page, 0,
1963 igc_rx_pg_size(rx_ring),
1964 DMA_FROM_DEVICE,
1965 IGC_RX_DMA_ATTR);
1966
1967 /* if mapping failed free memory back to system since
1968 * there isn't much point in holding memory we can't use
1969 */
1970 if (dma_mapping_error(rx_ring->dev, dma)) {
1971 __free_page(page);
1972
1973 rx_ring->rx_stats.alloc_failed++;
1974 return false;
1975 }
1976
1977 bi->dma = dma;
1978 bi->page = page;
1979 bi->page_offset = igc_rx_offset(rx_ring);
4ff32036
AG
1980 page_ref_add(page, USHRT_MAX - 1);
1981 bi->pagecnt_bias = USHRT_MAX;
aac8f68c
SN
1982
1983 return true;
1984}
1985
13b5b7fd
SN
1986/**
1987 * igc_alloc_rx_buffers - Replace used receive buffers; packet split
085c8589
SN
1988 * @rx_ring: rx descriptor ring
1989 * @cleaned_count: number of buffers to clean
13b5b7fd
SN
1990 */
1991static void igc_alloc_rx_buffers(struct igc_ring *rx_ring, u16 cleaned_count)
1992{
1993 union igc_adv_rx_desc *rx_desc;
1994 u16 i = rx_ring->next_to_use;
1995 struct igc_rx_buffer *bi;
1996 u16 bufsz;
1997
1998 /* nothing to do */
1999 if (!cleaned_count)
2000 return;
2001
2002 rx_desc = IGC_RX_DESC(rx_ring, i);
2003 bi = &rx_ring->rx_buffer_info[i];
2004 i -= rx_ring->count;
2005
2006 bufsz = igc_rx_bufsz(rx_ring);
2007
2008 do {
2009 if (!igc_alloc_mapped_page(rx_ring, bi))
2010 break;
2011
2012 /* sync the buffer for use by the device */
2013 dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
2014 bi->page_offset, bufsz,
2015 DMA_FROM_DEVICE);
2016
2017 /* Refresh the desc even if buffer_addrs didn't change
2018 * because each write-back erases this info.
2019 */
2020 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
2021
2022 rx_desc++;
2023 bi++;
2024 i++;
2025 if (unlikely(!i)) {
2026 rx_desc = IGC_RX_DESC(rx_ring, 0);
2027 bi = rx_ring->rx_buffer_info;
2028 i -= rx_ring->count;
2029 }
2030
2031 /* clear the length for the next_to_use descriptor */
2032 rx_desc->wb.upper.length = 0;
2033
2034 cleaned_count--;
2035 } while (cleaned_count);
2036
2037 i += rx_ring->count;
2038
2039 if (rx_ring->next_to_use != i) {
2040 /* record the next descriptor to use */
2041 rx_ring->next_to_use = i;
2042
2043 /* update next to alloc since we have filled the ring */
2044 rx_ring->next_to_alloc = i;
2045
2046 /* Force memory writes to complete before letting h/w
2047 * know there are new descriptors to fetch. (Only
2048 * applicable for weak-ordered memory model archs,
2049 * such as IA-64).
2050 */
2051 wmb();
2052 writel(i, rx_ring->tail);
2053 }
2054}
2055
fc9df2a0
AG
2056static bool igc_alloc_rx_buffers_zc(struct igc_ring *ring, u16 count)
2057{
2058 union igc_adv_rx_desc *desc;
2059 u16 i = ring->next_to_use;
2060 struct igc_rx_buffer *bi;
2061 dma_addr_t dma;
2062 bool ok = true;
2063
2064 if (!count)
2065 return ok;
2066
2067 desc = IGC_RX_DESC(ring, i);
2068 bi = &ring->rx_buffer_info[i];
2069 i -= ring->count;
2070
2071 do {
2072 bi->xdp = xsk_buff_alloc(ring->xsk_pool);
2073 if (!bi->xdp) {
2074 ok = false;
2075 break;
2076 }
2077
2078 dma = xsk_buff_xdp_get_dma(bi->xdp);
2079 desc->read.pkt_addr = cpu_to_le64(dma);
2080
2081 desc++;
2082 bi++;
2083 i++;
2084 if (unlikely(!i)) {
2085 desc = IGC_RX_DESC(ring, 0);
2086 bi = ring->rx_buffer_info;
2087 i -= ring->count;
2088 }
2089
2090 /* Clear the length for the next_to_use descriptor. */
2091 desc->wb.upper.length = 0;
2092
2093 count--;
2094 } while (count);
2095
2096 i += ring->count;
2097
2098 if (ring->next_to_use != i) {
2099 ring->next_to_use = i;
2100
2101 /* Force memory writes to complete before letting h/w
2102 * know there are new descriptors to fetch. (Only
2103 * applicable for weak-ordered memory model archs,
2104 * such as IA-64).
2105 */
2106 wmb();
2107 writel(i, ring->tail);
2108 }
2109
2110 return ok;
2111}
2112
73f1071c
AG
2113static int igc_xdp_init_tx_buffer(struct igc_tx_buffer *buffer,
2114 struct xdp_frame *xdpf,
2115 struct igc_ring *ring)
2116{
2117 dma_addr_t dma;
2118
2119 dma = dma_map_single(ring->dev, xdpf->data, xdpf->len, DMA_TO_DEVICE);
2120 if (dma_mapping_error(ring->dev, dma)) {
2121 netdev_err_once(ring->netdev, "Failed to map DMA for TX\n");
2122 return -ENOMEM;
2123 }
2124
859b4dfa 2125 buffer->type = IGC_TX_BUFFER_TYPE_XDP;
73f1071c 2126 buffer->xdpf = xdpf;
73f1071c
AG
2127 buffer->protocol = 0;
2128 buffer->bytecount = xdpf->len;
2129 buffer->gso_segs = 1;
2130 buffer->time_stamp = jiffies;
2131 dma_unmap_len_set(buffer, len, xdpf->len);
2132 dma_unmap_addr_set(buffer, dma, dma);
2133 return 0;
2134}
2135
2136/* This function requires __netif_tx_lock is held by the caller. */
2137static int igc_xdp_init_tx_descriptor(struct igc_ring *ring,
2138 struct xdp_frame *xdpf)
2139{
2140 struct igc_tx_buffer *buffer;
2141 union igc_adv_tx_desc *desc;
2142 u32 cmd_type, olinfo_status;
2143 int err;
2144
2145 if (!igc_desc_unused(ring))
2146 return -EBUSY;
2147
2148 buffer = &ring->tx_buffer_info[ring->next_to_use];
2149 err = igc_xdp_init_tx_buffer(buffer, xdpf, ring);
2150 if (err)
2151 return err;
2152
2153 cmd_type = IGC_ADVTXD_DTYP_DATA | IGC_ADVTXD_DCMD_DEXT |
2154 IGC_ADVTXD_DCMD_IFCS | IGC_TXD_DCMD |
2155 buffer->bytecount;
2156 olinfo_status = buffer->bytecount << IGC_ADVTXD_PAYLEN_SHIFT;
2157
2158 desc = IGC_TX_DESC(ring, ring->next_to_use);
2159 desc->read.cmd_type_len = cpu_to_le32(cmd_type);
2160 desc->read.olinfo_status = cpu_to_le32(olinfo_status);
2161 desc->read.buffer_addr = cpu_to_le64(dma_unmap_addr(buffer, dma));
2162
2163 netdev_tx_sent_queue(txring_txq(ring), buffer->bytecount);
2164
2165 buffer->next_to_watch = desc;
2166
2167 ring->next_to_use++;
2168 if (ring->next_to_use == ring->count)
2169 ring->next_to_use = 0;
2170
2171 return 0;
2172}
2173
2174static struct igc_ring *igc_xdp_get_tx_ring(struct igc_adapter *adapter,
2175 int cpu)
2176{
2177 int index = cpu;
2178
2179 if (unlikely(index < 0))
2180 index = 0;
2181
2182 while (index >= adapter->num_tx_queues)
2183 index -= adapter->num_tx_queues;
2184
2185 return adapter->tx_ring[index];
2186}
2187
2188static int igc_xdp_xmit_back(struct igc_adapter *adapter, struct xdp_buff *xdp)
2189{
2190 struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
2191 int cpu = smp_processor_id();
2192 struct netdev_queue *nq;
2193 struct igc_ring *ring;
2194 int res;
2195
2196 if (unlikely(!xdpf))
2197 return -EFAULT;
2198
2199 ring = igc_xdp_get_tx_ring(adapter, cpu);
2200 nq = txring_txq(ring);
2201
2202 __netif_tx_lock(nq, cpu);
2203 res = igc_xdp_init_tx_descriptor(ring, xdpf);
2204 __netif_tx_unlock(nq);
2205 return res;
2206}
2207
73a6e372
AG
2208/* This function assumes rcu_read_lock() is held by the caller. */
2209static int __igc_xdp_run_prog(struct igc_adapter *adapter,
2210 struct bpf_prog *prog,
2211 struct xdp_buff *xdp)
26575105 2212{
73a6e372 2213 u32 act = bpf_prog_run_xdp(prog, xdp);
26575105 2214
26575105
AG
2215 switch (act) {
2216 case XDP_PASS:
73a6e372 2217 return IGC_XDP_PASS;
73f1071c
AG
2218 case XDP_TX:
2219 if (igc_xdp_xmit_back(adapter, xdp) < 0)
45ce0859 2220 goto out_failure;
12628565 2221 return IGC_XDP_TX;
4ff32036
AG
2222 case XDP_REDIRECT:
2223 if (xdp_do_redirect(adapter->netdev, xdp, prog) < 0)
45ce0859 2224 goto out_failure;
12628565 2225 return IGC_XDP_REDIRECT;
4ff32036 2226 break;
26575105
AG
2227 default:
2228 bpf_warn_invalid_xdp_action(act);
2229 fallthrough;
2230 case XDP_ABORTED:
45ce0859 2231out_failure:
26575105
AG
2232 trace_xdp_exception(adapter->netdev, prog, act);
2233 fallthrough;
2234 case XDP_DROP:
73a6e372 2235 return IGC_XDP_CONSUMED;
26575105 2236 }
73a6e372
AG
2237}
2238
2239static struct sk_buff *igc_xdp_run_prog(struct igc_adapter *adapter,
2240 struct xdp_buff *xdp)
2241{
2242 struct bpf_prog *prog;
2243 int res;
2244
73a6e372
AG
2245 prog = READ_ONCE(adapter->xdp_prog);
2246 if (!prog) {
2247 res = IGC_XDP_PASS;
49589b23 2248 goto out;
73a6e372
AG
2249 }
2250
2251 res = __igc_xdp_run_prog(adapter, prog, xdp);
26575105 2252
49589b23 2253out:
26575105
AG
2254 return ERR_PTR(-res);
2255}
2256
73f1071c
AG
2257/* This function assumes __netif_tx_lock is held by the caller. */
2258static void igc_flush_tx_descriptors(struct igc_ring *ring)
2259{
2260 /* Once tail pointer is updated, hardware can fetch the descriptors
2261 * any time so we issue a write membar here to ensure all memory
2262 * writes are complete before the tail pointer is updated.
2263 */
2264 wmb();
2265 writel(ring->next_to_use, ring->tail);
2266}
2267
2268static void igc_finalize_xdp(struct igc_adapter *adapter, int status)
2269{
2270 int cpu = smp_processor_id();
2271 struct netdev_queue *nq;
2272 struct igc_ring *ring;
2273
2274 if (status & IGC_XDP_TX) {
2275 ring = igc_xdp_get_tx_ring(adapter, cpu);
2276 nq = txring_txq(ring);
2277
2278 __netif_tx_lock(nq, cpu);
2279 igc_flush_tx_descriptors(ring);
2280 __netif_tx_unlock(nq);
2281 }
4ff32036
AG
2282
2283 if (status & IGC_XDP_REDIRECT)
2284 xdp_do_flush();
73f1071c
AG
2285}
2286
a27e6e73
AG
2287static void igc_update_rx_stats(struct igc_q_vector *q_vector,
2288 unsigned int packets, unsigned int bytes)
2289{
2290 struct igc_ring *ring = q_vector->rx.ring;
2291
2292 u64_stats_update_begin(&ring->rx_syncp);
2293 ring->rx_stats.packets += packets;
2294 ring->rx_stats.bytes += bytes;
2295 u64_stats_update_end(&ring->rx_syncp);
2296
2297 q_vector->rx.total_packets += packets;
2298 q_vector->rx.total_bytes += bytes;
2299}
2300
0507ef8a
SN
2301static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget)
2302{
2303 unsigned int total_bytes = 0, total_packets = 0;
73f1071c 2304 struct igc_adapter *adapter = q_vector->adapter;
0507ef8a
SN
2305 struct igc_ring *rx_ring = q_vector->rx.ring;
2306 struct sk_buff *skb = rx_ring->skb;
2307 u16 cleaned_count = igc_desc_unused(rx_ring);
4ff32036 2308 int xdp_status = 0, rx_buffer_pgcnt;
0507ef8a
SN
2309
2310 while (likely(total_packets < budget)) {
2311 union igc_adv_rx_desc *rx_desc;
2312 struct igc_rx_buffer *rx_buffer;
73f1071c 2313 unsigned int size, truesize;
e1ed4f92 2314 ktime_t timestamp = 0;
26575105 2315 struct xdp_buff xdp;
e1ed4f92 2316 int pkt_offset = 0;
26575105 2317 void *pktbuf;
0507ef8a
SN
2318
2319 /* return some buffers to hardware, one at a time is too slow */
2320 if (cleaned_count >= IGC_RX_BUFFER_WRITE) {
2321 igc_alloc_rx_buffers(rx_ring, cleaned_count);
2322 cleaned_count = 0;
2323 }
2324
2325 rx_desc = IGC_RX_DESC(rx_ring, rx_ring->next_to_clean);
2326 size = le16_to_cpu(rx_desc->wb.upper.length);
2327 if (!size)
2328 break;
2329
2330 /* This memory barrier is needed to keep us from reading
2331 * any other fields out of the rx_desc until we know the
2332 * descriptor has been written back
2333 */
2334 dma_rmb();
2335
4ff32036 2336 rx_buffer = igc_get_rx_buffer(rx_ring, size, &rx_buffer_pgcnt);
73f1071c 2337 truesize = igc_get_rx_frame_truesize(rx_ring, size);
0507ef8a 2338
26575105 2339 pktbuf = page_address(rx_buffer->page) + rx_buffer->page_offset;
e1ed4f92 2340
26575105 2341 if (igc_test_staterr(rx_desc, IGC_RXDADV_STAT_TSIP)) {
e1ed4f92
AG
2342 timestamp = igc_ptp_rx_pktstamp(q_vector->adapter,
2343 pktbuf);
2344 pkt_offset = IGC_TS_HDR_LEN;
2345 size -= IGC_TS_HDR_LEN;
2346 }
2347
26575105 2348 if (!skb) {
082294f2
MC
2349 xdp_init_buff(&xdp, truesize, &rx_ring->xdp_rxq);
2350 xdp_prepare_buff(&xdp, pktbuf - igc_rx_offset(rx_ring),
2351 igc_rx_offset(rx_ring) + pkt_offset, size, false);
26575105
AG
2352
2353 skb = igc_xdp_run_prog(adapter, &xdp);
2354 }
2355
2356 if (IS_ERR(skb)) {
73f1071c
AG
2357 unsigned int xdp_res = -PTR_ERR(skb);
2358
2359 switch (xdp_res) {
2360 case IGC_XDP_CONSUMED:
2361 rx_buffer->pagecnt_bias++;
2362 break;
2363 case IGC_XDP_TX:
4ff32036 2364 case IGC_XDP_REDIRECT:
73f1071c
AG
2365 igc_rx_buffer_flip(rx_buffer, truesize);
2366 xdp_status |= xdp_res;
2367 break;
2368 }
2369
26575105
AG
2370 total_packets++;
2371 total_bytes += size;
2372 } else if (skb)
0507ef8a
SN
2373 igc_add_rx_frag(rx_ring, rx_buffer, skb, size);
2374 else if (ring_uses_build_skb(rx_ring))
2375 skb = igc_build_skb(rx_ring, rx_buffer, rx_desc, size);
2376 else
26575105
AG
2377 skb = igc_construct_skb(rx_ring, rx_buffer, &xdp,
2378 timestamp);
0507ef8a
SN
2379
2380 /* exit if we failed to retrieve a buffer */
2381 if (!skb) {
2382 rx_ring->rx_stats.alloc_failed++;
2383 rx_buffer->pagecnt_bias++;
2384 break;
2385 }
2386
4ff32036 2387 igc_put_rx_buffer(rx_ring, rx_buffer, rx_buffer_pgcnt);
0507ef8a
SN
2388 cleaned_count++;
2389
2390 /* fetch next buffer in frame if non-eop */
2391 if (igc_is_non_eop(rx_ring, rx_desc))
2392 continue;
2393
2394 /* verify the packet layout is correct */
2395 if (igc_cleanup_headers(rx_ring, rx_desc, skb)) {
2396 skb = NULL;
2397 continue;
2398 }
2399
2400 /* probably a little skewed due to removing CRC */
2401 total_bytes += skb->len;
2402
3a66abe9 2403 /* populate checksum, VLAN, and protocol */
0507ef8a
SN
2404 igc_process_skb_fields(rx_ring, rx_desc, skb);
2405
2406 napi_gro_receive(&q_vector->napi, skb);
2407
2408 /* reset skb pointer */
2409 skb = NULL;
2410
2411 /* update budget accounting */
2412 total_packets++;
2413 }
2414
73f1071c
AG
2415 if (xdp_status)
2416 igc_finalize_xdp(adapter, xdp_status);
2417
0507ef8a
SN
2418 /* place incomplete frames back on ring for completion */
2419 rx_ring->skb = skb;
2420
a27e6e73 2421 igc_update_rx_stats(q_vector, total_packets, total_bytes);
0507ef8a
SN
2422
2423 if (cleaned_count)
2424 igc_alloc_rx_buffers(rx_ring, cleaned_count);
2425
2426 return total_packets;
2427}
2428
fc9df2a0
AG
2429static struct sk_buff *igc_construct_skb_zc(struct igc_ring *ring,
2430 struct xdp_buff *xdp)
2431{
2432 unsigned int metasize = xdp->data - xdp->data_meta;
2433 unsigned int datasize = xdp->data_end - xdp->data;
2434 unsigned int totalsize = metasize + datasize;
2435 struct sk_buff *skb;
2436
2437 skb = __napi_alloc_skb(&ring->q_vector->napi,
2438 xdp->data_end - xdp->data_hard_start,
2439 GFP_ATOMIC | __GFP_NOWARN);
2440 if (unlikely(!skb))
2441 return NULL;
2442
2443 skb_reserve(skb, xdp->data_meta - xdp->data_hard_start);
2444 memcpy(__skb_put(skb, totalsize), xdp->data_meta, totalsize);
2445 if (metasize)
2446 skb_metadata_set(skb, metasize);
2447
2448 return skb;
2449}
2450
2451static void igc_dispatch_skb_zc(struct igc_q_vector *q_vector,
2452 union igc_adv_rx_desc *desc,
2453 struct xdp_buff *xdp,
2454 ktime_t timestamp)
2455{
2456 struct igc_ring *ring = q_vector->rx.ring;
2457 struct sk_buff *skb;
2458
2459 skb = igc_construct_skb_zc(ring, xdp);
2460 if (!skb) {
2461 ring->rx_stats.alloc_failed++;
2462 return;
2463 }
2464
2465 if (timestamp)
2466 skb_hwtstamps(skb)->hwtstamp = timestamp;
2467
2468 if (igc_cleanup_headers(ring, desc, skb))
2469 return;
2470
2471 igc_process_skb_fields(ring, desc, skb);
2472 napi_gro_receive(&q_vector->napi, skb);
2473}
2474
2475static int igc_clean_rx_irq_zc(struct igc_q_vector *q_vector, const int budget)
2476{
2477 struct igc_adapter *adapter = q_vector->adapter;
2478 struct igc_ring *ring = q_vector->rx.ring;
2479 u16 cleaned_count = igc_desc_unused(ring);
2480 int total_bytes = 0, total_packets = 0;
2481 u16 ntc = ring->next_to_clean;
2482 struct bpf_prog *prog;
2483 bool failure = false;
2484 int xdp_status = 0;
2485
2486 rcu_read_lock();
2487
2488 prog = READ_ONCE(adapter->xdp_prog);
2489
2490 while (likely(total_packets < budget)) {
2491 union igc_adv_rx_desc *desc;
2492 struct igc_rx_buffer *bi;
2493 ktime_t timestamp = 0;
2494 unsigned int size;
2495 int res;
2496
2497 desc = IGC_RX_DESC(ring, ntc);
2498 size = le16_to_cpu(desc->wb.upper.length);
2499 if (!size)
2500 break;
2501
2502 /* This memory barrier is needed to keep us from reading
2503 * any other fields out of the rx_desc until we know the
2504 * descriptor has been written back
2505 */
2506 dma_rmb();
2507
2508 bi = &ring->rx_buffer_info[ntc];
2509
2510 if (igc_test_staterr(desc, IGC_RXDADV_STAT_TSIP)) {
2511 timestamp = igc_ptp_rx_pktstamp(q_vector->adapter,
2512 bi->xdp->data);
2513
2514 bi->xdp->data += IGC_TS_HDR_LEN;
2515
2516 /* HW timestamp has been copied into local variable. Metadata
2517 * length when XDP program is called should be 0.
2518 */
2519 bi->xdp->data_meta += IGC_TS_HDR_LEN;
2520 size -= IGC_TS_HDR_LEN;
2521 }
2522
2523 bi->xdp->data_end = bi->xdp->data + size;
2524 xsk_buff_dma_sync_for_cpu(bi->xdp, ring->xsk_pool);
2525
2526 res = __igc_xdp_run_prog(adapter, prog, bi->xdp);
2527 switch (res) {
2528 case IGC_XDP_PASS:
2529 igc_dispatch_skb_zc(q_vector, desc, bi->xdp, timestamp);
2530 fallthrough;
2531 case IGC_XDP_CONSUMED:
2532 xsk_buff_free(bi->xdp);
2533 break;
2534 case IGC_XDP_TX:
2535 case IGC_XDP_REDIRECT:
2536 xdp_status |= res;
2537 break;
2538 }
2539
2540 bi->xdp = NULL;
2541 total_bytes += size;
2542 total_packets++;
2543 cleaned_count++;
2544 ntc++;
2545 if (ntc == ring->count)
2546 ntc = 0;
2547 }
2548
2549 ring->next_to_clean = ntc;
2550 rcu_read_unlock();
2551
2552 if (cleaned_count >= IGC_RX_BUFFER_WRITE)
2553 failure = !igc_alloc_rx_buffers_zc(ring, cleaned_count);
2554
2555 if (xdp_status)
2556 igc_finalize_xdp(adapter, xdp_status);
2557
2558 igc_update_rx_stats(q_vector, total_packets, total_bytes);
2559
2560 if (xsk_uses_need_wakeup(ring->xsk_pool)) {
2561 if (failure || ring->next_to_clean == ring->next_to_use)
2562 xsk_set_rx_need_wakeup(ring->xsk_pool);
2563 else
2564 xsk_clear_rx_need_wakeup(ring->xsk_pool);
2565 return total_packets;
2566 }
2567
2568 return failure ? budget : total_packets;
2569}
2570
a27e6e73
AG
2571static void igc_update_tx_stats(struct igc_q_vector *q_vector,
2572 unsigned int packets, unsigned int bytes)
2573{
2574 struct igc_ring *ring = q_vector->tx.ring;
2575
2576 u64_stats_update_begin(&ring->tx_syncp);
2577 ring->tx_stats.bytes += bytes;
2578 ring->tx_stats.packets += packets;
2579 u64_stats_update_end(&ring->tx_syncp);
2580
2581 q_vector->tx.total_bytes += bytes;
2582 q_vector->tx.total_packets += packets;
2583}
2584
9acf59a7
AG
2585static void igc_xdp_xmit_zc(struct igc_ring *ring)
2586{
2587 struct xsk_buff_pool *pool = ring->xsk_pool;
2588 struct netdev_queue *nq = txring_txq(ring);
2589 union igc_adv_tx_desc *tx_desc = NULL;
2590 int cpu = smp_processor_id();
2591 u16 ntu = ring->next_to_use;
2592 struct xdp_desc xdp_desc;
2593 u16 budget;
2594
2595 if (!netif_carrier_ok(ring->netdev))
2596 return;
2597
2598 __netif_tx_lock(nq, cpu);
2599
2600 budget = igc_desc_unused(ring);
2601
2602 while (xsk_tx_peek_desc(pool, &xdp_desc) && budget--) {
2603 u32 cmd_type, olinfo_status;
2604 struct igc_tx_buffer *bi;
2605 dma_addr_t dma;
2606
2607 cmd_type = IGC_ADVTXD_DTYP_DATA | IGC_ADVTXD_DCMD_DEXT |
2608 IGC_ADVTXD_DCMD_IFCS | IGC_TXD_DCMD |
2609 xdp_desc.len;
2610 olinfo_status = xdp_desc.len << IGC_ADVTXD_PAYLEN_SHIFT;
2611
2612 dma = xsk_buff_raw_get_dma(pool, xdp_desc.addr);
2613 xsk_buff_raw_dma_sync_for_device(pool, dma, xdp_desc.len);
2614
2615 tx_desc = IGC_TX_DESC(ring, ntu);
2616 tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type);
2617 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
2618 tx_desc->read.buffer_addr = cpu_to_le64(dma);
2619
2620 bi = &ring->tx_buffer_info[ntu];
2621 bi->type = IGC_TX_BUFFER_TYPE_XSK;
2622 bi->protocol = 0;
2623 bi->bytecount = xdp_desc.len;
2624 bi->gso_segs = 1;
2625 bi->time_stamp = jiffies;
2626 bi->next_to_watch = tx_desc;
2627
2628 netdev_tx_sent_queue(txring_txq(ring), xdp_desc.len);
2629
2630 ntu++;
2631 if (ntu == ring->count)
2632 ntu = 0;
2633 }
2634
2635 ring->next_to_use = ntu;
2636 if (tx_desc) {
2637 igc_flush_tx_descriptors(ring);
2638 xsk_tx_release(pool);
2639 }
2640
2641 __netif_tx_unlock(nq);
2642}
2643
0507ef8a
SN
2644/**
2645 * igc_clean_tx_irq - Reclaim resources after transmit completes
2646 * @q_vector: pointer to q_vector containing needed info
2647 * @napi_budget: Used to determine if we are in netpoll
2648 *
2649 * returns true if ring is completely cleaned
2650 */
2651static bool igc_clean_tx_irq(struct igc_q_vector *q_vector, int napi_budget)
2652{
2653 struct igc_adapter *adapter = q_vector->adapter;
2654 unsigned int total_bytes = 0, total_packets = 0;
2655 unsigned int budget = q_vector->tx.work_limit;
2656 struct igc_ring *tx_ring = q_vector->tx.ring;
2657 unsigned int i = tx_ring->next_to_clean;
2658 struct igc_tx_buffer *tx_buffer;
2659 union igc_adv_tx_desc *tx_desc;
9acf59a7 2660 u32 xsk_frames = 0;
0507ef8a
SN
2661
2662 if (test_bit(__IGC_DOWN, &adapter->state))
2663 return true;
2664
2665 tx_buffer = &tx_ring->tx_buffer_info[i];
2666 tx_desc = IGC_TX_DESC(tx_ring, i);
2667 i -= tx_ring->count;
2668
2669 do {
2670 union igc_adv_tx_desc *eop_desc = tx_buffer->next_to_watch;
2671
2672 /* if next_to_watch is not set then there is no work pending */
2673 if (!eop_desc)
2674 break;
2675
2676 /* prevent any other reads prior to eop_desc */
2677 smp_rmb();
2678
2679 /* if DD is not set pending work has not been completed */
2680 if (!(eop_desc->wb.status & cpu_to_le32(IGC_TXD_STAT_DD)))
2681 break;
2682
2683 /* clear next_to_watch to prevent false hangs */
2684 tx_buffer->next_to_watch = NULL;
2685
2686 /* update the statistics for this packet */
2687 total_bytes += tx_buffer->bytecount;
2688 total_packets += tx_buffer->gso_segs;
2689
859b4dfa 2690 switch (tx_buffer->type) {
9acf59a7
AG
2691 case IGC_TX_BUFFER_TYPE_XSK:
2692 xsk_frames++;
2693 break;
859b4dfa 2694 case IGC_TX_BUFFER_TYPE_XDP:
73f1071c 2695 xdp_return_frame(tx_buffer->xdpf);
9acf59a7 2696 igc_unmap_tx_buffer(tx_ring->dev, tx_buffer);
859b4dfa
AG
2697 break;
2698 case IGC_TX_BUFFER_TYPE_SKB:
73f1071c 2699 napi_consume_skb(tx_buffer->skb, napi_budget);
9acf59a7 2700 igc_unmap_tx_buffer(tx_ring->dev, tx_buffer);
859b4dfa
AG
2701 break;
2702 default:
2703 netdev_warn_once(tx_ring->netdev, "Unknown Tx buffer type\n");
2704 break;
2705 }
0507ef8a 2706
0507ef8a
SN
2707 /* clear last DMA location and unmap remaining buffers */
2708 while (tx_desc != eop_desc) {
2709 tx_buffer++;
2710 tx_desc++;
2711 i++;
2712 if (unlikely(!i)) {
2713 i -= tx_ring->count;
2714 tx_buffer = tx_ring->tx_buffer_info;
2715 tx_desc = IGC_TX_DESC(tx_ring, 0);
2716 }
2717
2718 /* unmap any remaining paged data */
61234295
AG
2719 if (dma_unmap_len(tx_buffer, len))
2720 igc_unmap_tx_buffer(tx_ring->dev, tx_buffer);
0507ef8a
SN
2721 }
2722
2723 /* move us one more past the eop_desc for start of next pkt */
2724 tx_buffer++;
2725 tx_desc++;
2726 i++;
2727 if (unlikely(!i)) {
2728 i -= tx_ring->count;
2729 tx_buffer = tx_ring->tx_buffer_info;
2730 tx_desc = IGC_TX_DESC(tx_ring, 0);
2731 }
2732
2733 /* issue prefetch for next Tx descriptor */
2734 prefetch(tx_desc);
2735
2736 /* update budget accounting */
2737 budget--;
2738 } while (likely(budget));
2739
2740 netdev_tx_completed_queue(txring_txq(tx_ring),
2741 total_packets, total_bytes);
2742
2743 i += tx_ring->count;
2744 tx_ring->next_to_clean = i;
a27e6e73
AG
2745
2746 igc_update_tx_stats(q_vector, total_packets, total_bytes);
0507ef8a 2747
9acf59a7
AG
2748 if (tx_ring->xsk_pool) {
2749 if (xsk_frames)
2750 xsk_tx_completed(tx_ring->xsk_pool, xsk_frames);
2751 if (xsk_uses_need_wakeup(tx_ring->xsk_pool))
2752 xsk_set_tx_need_wakeup(tx_ring->xsk_pool);
2753 igc_xdp_xmit_zc(tx_ring);
2754 }
2755
0507ef8a
SN
2756 if (test_bit(IGC_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags)) {
2757 struct igc_hw *hw = &adapter->hw;
2758
2759 /* Detect a transmit hang in hardware, this serializes the
2760 * check with the clearing of time_stamp and movement of i
2761 */
2762 clear_bit(IGC_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags);
2763 if (tx_buffer->next_to_watch &&
2764 time_after(jiffies, tx_buffer->time_stamp +
2765 (adapter->tx_timeout_factor * HZ)) &&
2766 !(rd32(IGC_STATUS) & IGC_STATUS_TXOFF)) {
2767 /* detected Tx unit hang */
25f06eff
AG
2768 netdev_err(tx_ring->netdev,
2769 "Detected Tx Unit Hang\n"
2770 " Tx Queue <%d>\n"
2771 " TDH <%x>\n"
2772 " TDT <%x>\n"
2773 " next_to_use <%x>\n"
2774 " next_to_clean <%x>\n"
2775 "buffer_info[next_to_clean]\n"
2776 " time_stamp <%lx>\n"
2777 " next_to_watch <%p>\n"
2778 " jiffies <%lx>\n"
2779 " desc.status <%x>\n",
2780 tx_ring->queue_index,
2781 rd32(IGC_TDH(tx_ring->reg_idx)),
2782 readl(tx_ring->tail),
2783 tx_ring->next_to_use,
2784 tx_ring->next_to_clean,
2785 tx_buffer->time_stamp,
2786 tx_buffer->next_to_watch,
2787 jiffies,
2788 tx_buffer->next_to_watch->wb.status);
bb9089b6
DC
2789 netif_stop_subqueue(tx_ring->netdev,
2790 tx_ring->queue_index);
0507ef8a
SN
2791
2792 /* we are about to reset, no point in enabling stuff */
2793 return true;
2794 }
2795 }
2796
2797#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
2798 if (unlikely(total_packets &&
2799 netif_carrier_ok(tx_ring->netdev) &&
2800 igc_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD)) {
2801 /* Make sure that anybody stopping the queue after this
2802 * sees the new next_to_clean.
2803 */
2804 smp_mb();
2805 if (__netif_subqueue_stopped(tx_ring->netdev,
2806 tx_ring->queue_index) &&
2807 !(test_bit(__IGC_DOWN, &adapter->state))) {
2808 netif_wake_subqueue(tx_ring->netdev,
2809 tx_ring->queue_index);
2810
2811 u64_stats_update_begin(&tx_ring->tx_syncp);
2812 tx_ring->tx_stats.restart_queue++;
2813 u64_stats_update_end(&tx_ring->tx_syncp);
2814 }
2815 }
2816
2817 return !!budget;
2818}
2819
750433d0
AG
2820static int igc_find_mac_filter(struct igc_adapter *adapter,
2821 enum igc_mac_filter_type type, const u8 *addr)
1a7c0f2e 2822{
d66358ca
AG
2823 struct igc_hw *hw = &adapter->hw;
2824 int max_entries = hw->mac.rar_entry_count;
2825 u32 ral, rah;
794e5bc8 2826 int i;
1a7c0f2e 2827
794e5bc8 2828 for (i = 0; i < max_entries; i++) {
d66358ca
AG
2829 ral = rd32(IGC_RAL(i));
2830 rah = rd32(IGC_RAH(i));
1a7c0f2e 2831
d66358ca 2832 if (!(rah & IGC_RAH_AV))
794e5bc8 2833 continue;
750433d0
AG
2834 if (!!(rah & IGC_RAH_ASEL_SRC_ADDR) != type)
2835 continue;
d66358ca
AG
2836 if ((rah & IGC_RAH_RAH_MASK) !=
2837 le16_to_cpup((__le16 *)(addr + 4)))
2838 continue;
2839 if (ral != le32_to_cpup((__le32 *)(addr)))
794e5bc8 2840 continue;
86a4de66 2841
794e5bc8
AG
2842 return i;
2843 }
1a7c0f2e 2844
794e5bc8 2845 return -1;
1a7c0f2e
SN
2846}
2847
794e5bc8 2848static int igc_get_avail_mac_filter_slot(struct igc_adapter *adapter)
86a4de66 2849{
d66358ca
AG
2850 struct igc_hw *hw = &adapter->hw;
2851 int max_entries = hw->mac.rar_entry_count;
2852 u32 rah;
794e5bc8 2853 int i;
86a4de66 2854
794e5bc8 2855 for (i = 0; i < max_entries; i++) {
d66358ca 2856 rah = rd32(IGC_RAH(i));
86a4de66 2857
d66358ca 2858 if (!(rah & IGC_RAH_AV))
794e5bc8
AG
2859 return i;
2860 }
86a4de66 2861
794e5bc8 2862 return -1;
86a4de66
SN
2863}
2864
e9736fa4
AG
2865/**
2866 * igc_add_mac_filter() - Add MAC address filter
2867 * @adapter: Pointer to adapter where the filter should be added
750433d0 2868 * @type: MAC address filter type (source or destination)
e9736fa4
AG
2869 * @addr: MAC address
2870 * @queue: If non-negative, queue assignment feature is enabled and frames
2871 * matching the filter are enqueued onto 'queue'. Otherwise, queue
2872 * assignment is disabled.
e9736fa4
AG
2873 *
2874 * Return: 0 in case of success, negative errno code otherwise.
86a4de66
SN
2875 */
2876static int igc_add_mac_filter(struct igc_adapter *adapter,
36fa2152
AG
2877 enum igc_mac_filter_type type, const u8 *addr,
2878 int queue)
86a4de66 2879{
949b922e 2880 struct net_device *dev = adapter->netdev;
794e5bc8 2881 int index;
86a4de66 2882
750433d0 2883 index = igc_find_mac_filter(adapter, type, addr);
794e5bc8 2884 if (index >= 0)
d66358ca 2885 goto update_filter;
86a4de66 2886
794e5bc8
AG
2887 index = igc_get_avail_mac_filter_slot(adapter);
2888 if (index < 0)
2889 return -ENOSPC;
86a4de66 2890
750433d0
AG
2891 netdev_dbg(dev, "Add MAC address filter: index %d type %s address %pM queue %d\n",
2892 index, type == IGC_MAC_FILTER_TYPE_DST ? "dst" : "src",
2893 addr, queue);
949b922e 2894
d66358ca 2895update_filter:
750433d0 2896 igc_set_mac_filter_hw(adapter, index, type, addr, queue);
794e5bc8 2897 return 0;
86a4de66
SN
2898}
2899
c6aae591
AG
2900/**
2901 * igc_del_mac_filter() - Delete MAC address filter
2902 * @adapter: Pointer to adapter where the filter should be deleted from
750433d0 2903 * @type: MAC address filter type (source or destination)
c6aae591 2904 * @addr: MAC address
86a4de66 2905 */
acda576f
AG
2906static void igc_del_mac_filter(struct igc_adapter *adapter,
2907 enum igc_mac_filter_type type, const u8 *addr)
86a4de66 2908{
949b922e 2909 struct net_device *dev = adapter->netdev;
5f930713 2910 int index;
86a4de66 2911
750433d0 2912 index = igc_find_mac_filter(adapter, type, addr);
5f930713 2913 if (index < 0)
acda576f 2914 return;
86a4de66 2915
d66358ca 2916 if (index == 0) {
5f930713
AG
2917 /* If this is the default filter, we don't actually delete it.
2918 * We just reset to its default value i.e. disable queue
2919 * assignment.
2920 */
949b922e
AG
2921 netdev_dbg(dev, "Disable default MAC filter queue assignment");
2922
750433d0 2923 igc_set_mac_filter_hw(adapter, 0, type, addr, -1);
5f930713 2924 } else {
750433d0
AG
2925 netdev_dbg(dev, "Delete MAC address filter: index %d type %s address %pM\n",
2926 index,
2927 type == IGC_MAC_FILTER_TYPE_DST ? "dst" : "src",
2928 addr);
949b922e 2929
5f930713 2930 igc_clear_mac_filter_hw(adapter, index);
86a4de66 2931 }
86a4de66
SN
2932}
2933
12ddee68
AG
2934/**
2935 * igc_add_vlan_prio_filter() - Add VLAN priority filter
2936 * @adapter: Pointer to adapter where the filter should be added
2937 * @prio: VLAN priority value
2938 * @queue: Queue number which matching frames are assigned to
2939 *
2940 * Return: 0 in case of success, negative errno code otherwise.
2941 */
36fa2152
AG
2942static int igc_add_vlan_prio_filter(struct igc_adapter *adapter, int prio,
2943 int queue)
86a4de66 2944{
12ddee68 2945 struct net_device *dev = adapter->netdev;
86a4de66 2946 struct igc_hw *hw = &adapter->hw;
12ddee68 2947 u32 vlanpqf;
86a4de66 2948
12ddee68 2949 vlanpqf = rd32(IGC_VLANPQF);
86a4de66 2950
12ddee68
AG
2951 if (vlanpqf & IGC_VLANPQF_VALID(prio)) {
2952 netdev_dbg(dev, "VLAN priority filter already in use\n");
2953 return -EEXIST;
2954 }
86a4de66 2955
12ddee68
AG
2956 vlanpqf |= IGC_VLANPQF_QSEL(prio, queue);
2957 vlanpqf |= IGC_VLANPQF_VALID(prio);
86a4de66 2958
12ddee68
AG
2959 wr32(IGC_VLANPQF, vlanpqf);
2960
2961 netdev_dbg(dev, "Add VLAN priority filter: prio %d queue %d\n",
2962 prio, queue);
2963 return 0;
2964}
2965
2966/**
2967 * igc_del_vlan_prio_filter() - Delete VLAN priority filter
2968 * @adapter: Pointer to adapter where the filter should be deleted from
2969 * @prio: VLAN priority value
2970 */
36fa2152 2971static void igc_del_vlan_prio_filter(struct igc_adapter *adapter, int prio)
12ddee68
AG
2972{
2973 struct igc_hw *hw = &adapter->hw;
2974 u32 vlanpqf;
2975
2976 vlanpqf = rd32(IGC_VLANPQF);
2977
2978 vlanpqf &= ~IGC_VLANPQF_VALID(prio);
2979 vlanpqf &= ~IGC_VLANPQF_QSEL(prio, IGC_VLANPQF_QUEUE_MASK);
2980
2981 wr32(IGC_VLANPQF, vlanpqf);
2982
2983 netdev_dbg(adapter->netdev, "Delete VLAN priority filter: prio %d\n",
2984 prio);
2985}
2986
aa7ca726
AG
2987static int igc_get_avail_etype_filter_slot(struct igc_adapter *adapter)
2988{
2989 struct igc_hw *hw = &adapter->hw;
2990 int i;
2991
2992 for (i = 0; i < MAX_ETYPE_FILTER; i++) {
2993 u32 etqf = rd32(IGC_ETQF(i));
2994
2995 if (!(etqf & IGC_ETQF_FILTER_ENABLE))
2996 return i;
86a4de66
SN
2997 }
2998
aa7ca726 2999 return -1;
86a4de66
SN
3000}
3001
aa7ca726
AG
3002/**
3003 * igc_add_etype_filter() - Add ethertype filter
3004 * @adapter: Pointer to adapter where the filter should be added
3005 * @etype: Ethertype value
3006 * @queue: If non-negative, queue assignment feature is enabled and frames
3007 * matching the filter are enqueued onto 'queue'. Otherwise, queue
3008 * assignment is disabled.
3009 *
3010 * Return: 0 in case of success, negative errno code otherwise.
86a4de66 3011 */
36fa2152
AG
3012static int igc_add_etype_filter(struct igc_adapter *adapter, u16 etype,
3013 int queue)
aa7ca726
AG
3014{
3015 struct igc_hw *hw = &adapter->hw;
3016 int index;
3017 u32 etqf;
3018
3019 index = igc_get_avail_etype_filter_slot(adapter);
3020 if (index < 0)
3021 return -ENOSPC;
3022
3023 etqf = rd32(IGC_ETQF(index));
3024
3025 etqf &= ~IGC_ETQF_ETYPE_MASK;
3026 etqf |= etype;
3027
3028 if (queue >= 0) {
3029 etqf &= ~IGC_ETQF_QUEUE_MASK;
3030 etqf |= (queue << IGC_ETQF_QUEUE_SHIFT);
3031 etqf |= IGC_ETQF_QUEUE_ENABLE;
3032 }
3033
3034 etqf |= IGC_ETQF_FILTER_ENABLE;
3035
3036 wr32(IGC_ETQF(index), etqf);
3037
3038 netdev_dbg(adapter->netdev, "Add ethertype filter: etype %04x queue %d\n",
3039 etype, queue);
3040 return 0;
3041}
3042
3043static int igc_find_etype_filter(struct igc_adapter *adapter, u16 etype)
86a4de66
SN
3044{
3045 struct igc_hw *hw = &adapter->hw;
86a4de66
SN
3046 int i;
3047
aa7ca726
AG
3048 for (i = 0; i < MAX_ETYPE_FILTER; i++) {
3049 u32 etqf = rd32(IGC_ETQF(i));
86a4de66 3050
aa7ca726
AG
3051 if ((etqf & IGC_ETQF_ETYPE_MASK) == etype)
3052 return i;
3053 }
86a4de66 3054
aa7ca726
AG
3055 return -1;
3056}
86a4de66 3057
aa7ca726
AG
3058/**
3059 * igc_del_etype_filter() - Delete ethertype filter
3060 * @adapter: Pointer to adapter where the filter should be deleted from
3061 * @etype: Ethertype value
aa7ca726 3062 */
acda576f 3063static void igc_del_etype_filter(struct igc_adapter *adapter, u16 etype)
aa7ca726
AG
3064{
3065 struct igc_hw *hw = &adapter->hw;
3066 int index;
3067
3068 index = igc_find_etype_filter(adapter, etype);
3069 if (index < 0)
acda576f 3070 return;
aa7ca726
AG
3071
3072 wr32(IGC_ETQF(index), 0);
3073
3074 netdev_dbg(adapter->netdev, "Delete ethertype filter: etype %04x\n",
3075 etype);
aa7ca726
AG
3076}
3077
6574631b
KK
3078static int igc_flex_filter_select(struct igc_adapter *adapter,
3079 struct igc_flex_filter *input,
3080 u32 *fhft)
3081{
3082 struct igc_hw *hw = &adapter->hw;
3083 u8 fhft_index;
3084 u32 fhftsl;
3085
3086 if (input->index >= MAX_FLEX_FILTER) {
3087 dev_err(&adapter->pdev->dev, "Wrong Flex Filter index selected!\n");
3088 return -EINVAL;
3089 }
3090
3091 /* Indirect table select register */
3092 fhftsl = rd32(IGC_FHFTSL);
3093 fhftsl &= ~IGC_FHFTSL_FTSL_MASK;
3094 switch (input->index) {
3095 case 0 ... 7:
3096 fhftsl |= 0x00;
3097 break;
3098 case 8 ... 15:
3099 fhftsl |= 0x01;
3100 break;
3101 case 16 ... 23:
3102 fhftsl |= 0x02;
3103 break;
3104 case 24 ... 31:
3105 fhftsl |= 0x03;
3106 break;
3107 }
3108 wr32(IGC_FHFTSL, fhftsl);
3109
3110 /* Normalize index down to host table register */
3111 fhft_index = input->index % 8;
3112
3113 *fhft = (fhft_index < 4) ? IGC_FHFT(fhft_index) :
3114 IGC_FHFT_EXT(fhft_index - 4);
3115
3116 return 0;
3117}
3118
2b477d05
KK
3119static int igc_write_flex_filter_ll(struct igc_adapter *adapter,
3120 struct igc_flex_filter *input)
6574631b
KK
3121{
3122 struct device *dev = &adapter->pdev->dev;
3123 struct igc_hw *hw = &adapter->hw;
3124 u8 *data = input->data;
3125 u8 *mask = input->mask;
3126 u32 queuing;
3127 u32 fhft;
3128 u32 wufc;
3129 int ret;
3130 int i;
3131
3132 /* Length has to be aligned to 8. Otherwise the filter will fail. Bail
3133 * out early to avoid surprises later.
3134 */
3135 if (input->length % 8 != 0) {
3136 dev_err(dev, "The length of a flex filter has to be 8 byte aligned!\n");
3137 return -EINVAL;
3138 }
3139
3140 /* Select corresponding flex filter register and get base for host table. */
3141 ret = igc_flex_filter_select(adapter, input, &fhft);
3142 if (ret)
3143 return ret;
3144
3145 /* When adding a filter globally disable flex filter feature. That is
3146 * recommended within the datasheet.
3147 */
3148 wufc = rd32(IGC_WUFC);
3149 wufc &= ~IGC_WUFC_FLEX_HQ;
3150 wr32(IGC_WUFC, wufc);
3151
3152 /* Configure filter */
3153 queuing = input->length & IGC_FHFT_LENGTH_MASK;
3154 queuing |= (input->rx_queue << IGC_FHFT_QUEUE_SHIFT) & IGC_FHFT_QUEUE_MASK;
3155 queuing |= (input->prio << IGC_FHFT_PRIO_SHIFT) & IGC_FHFT_PRIO_MASK;
3156
3157 if (input->immediate_irq)
3158 queuing |= IGC_FHFT_IMM_INT;
3159
3160 if (input->drop)
3161 queuing |= IGC_FHFT_DROP;
3162
3163 wr32(fhft + 0xFC, queuing);
3164
3165 /* Write data (128 byte) and mask (128 bit) */
3166 for (i = 0; i < 16; ++i) {
3167 const size_t data_idx = i * 8;
3168 const size_t row_idx = i * 16;
3169 u32 dw0 =
3170 (data[data_idx + 0] << 0) |
3171 (data[data_idx + 1] << 8) |
3172 (data[data_idx + 2] << 16) |
3173 (data[data_idx + 3] << 24);
3174 u32 dw1 =
3175 (data[data_idx + 4] << 0) |
3176 (data[data_idx + 5] << 8) |
3177 (data[data_idx + 6] << 16) |
3178 (data[data_idx + 7] << 24);
3179 u32 tmp;
3180
3181 /* Write row: dw0, dw1 and mask */
3182 wr32(fhft + row_idx, dw0);
3183 wr32(fhft + row_idx + 4, dw1);
3184
3185 /* mask is only valid for MASK(7, 0) */
3186 tmp = rd32(fhft + row_idx + 8);
3187 tmp &= ~GENMASK(7, 0);
3188 tmp |= mask[i];
3189 wr32(fhft + row_idx + 8, tmp);
3190 }
3191
3192 /* Enable filter. */
3193 wufc |= IGC_WUFC_FLEX_HQ;
3194 if (input->index > 8) {
3195 /* Filter 0-7 are enabled via WUFC. The other 24 filters are not. */
3196 u32 wufc_ext = rd32(IGC_WUFC_EXT);
3197
3198 wufc_ext |= (IGC_WUFC_EXT_FLX8 << (input->index - 8));
3199
3200 wr32(IGC_WUFC_EXT, wufc_ext);
3201 } else {
3202 wufc |= (IGC_WUFC_FLX0 << input->index);
3203 }
3204 wr32(IGC_WUFC, wufc);
3205
3206 dev_dbg(&adapter->pdev->dev, "Added flex filter %u to HW.\n",
3207 input->index);
3208
3209 return 0;
3210}
3211
2b477d05
KK
3212static void igc_flex_filter_add_field(struct igc_flex_filter *flex,
3213 const void *src, unsigned int offset,
3214 size_t len, const void *mask)
3215{
3216 int i;
3217
3218 /* data */
3219 memcpy(&flex->data[offset], src, len);
3220
3221 /* mask */
3222 for (i = 0; i < len; ++i) {
3223 const unsigned int idx = i + offset;
3224 const u8 *ptr = mask;
3225
3226 if (mask) {
3227 if (ptr[i] & 0xff)
3228 flex->mask[idx / 8] |= BIT(idx % 8);
3229
3230 continue;
3231 }
3232
3233 flex->mask[idx / 8] |= BIT(idx % 8);
3234 }
3235}
3236
3237static int igc_find_avail_flex_filter_slot(struct igc_adapter *adapter)
3238{
3239 struct igc_hw *hw = &adapter->hw;
3240 u32 wufc, wufc_ext;
3241 int i;
3242
3243 wufc = rd32(IGC_WUFC);
3244 wufc_ext = rd32(IGC_WUFC_EXT);
3245
3246 for (i = 0; i < MAX_FLEX_FILTER; i++) {
3247 if (i < 8) {
3248 if (!(wufc & (IGC_WUFC_FLX0 << i)))
3249 return i;
3250 } else {
3251 if (!(wufc_ext & (IGC_WUFC_EXT_FLX8 << (i - 8))))
3252 return i;
3253 }
3254 }
3255
3256 return -ENOSPC;
3257}
3258
3259static bool igc_flex_filter_in_use(struct igc_adapter *adapter)
3260{
3261 struct igc_hw *hw = &adapter->hw;
3262 u32 wufc, wufc_ext;
3263
3264 wufc = rd32(IGC_WUFC);
3265 wufc_ext = rd32(IGC_WUFC_EXT);
3266
3267 if (wufc & IGC_WUFC_FILTER_MASK)
3268 return true;
3269
3270 if (wufc_ext & IGC_WUFC_EXT_FILTER_MASK)
3271 return true;
3272
3273 return false;
3274}
3275
3276static int igc_add_flex_filter(struct igc_adapter *adapter,
3277 struct igc_nfc_rule *rule)
3278{
3279 struct igc_flex_filter flex = { };
3280 struct igc_nfc_filter *filter = &rule->filter;
3281 unsigned int eth_offset, user_offset;
3282 int ret, index;
3283 bool vlan;
3284
3285 index = igc_find_avail_flex_filter_slot(adapter);
3286 if (index < 0)
3287 return -ENOSPC;
3288
3289 /* Construct the flex filter:
3290 * -> dest_mac [6]
3291 * -> src_mac [6]
3292 * -> tpid [2]
3293 * -> vlan tci [2]
3294 * -> ether type [2]
3295 * -> user data [8]
3296 * -> = 26 bytes => 32 length
3297 */
3298 flex.index = index;
3299 flex.length = 32;
3300 flex.rx_queue = rule->action;
3301
3302 vlan = rule->filter.vlan_tci || rule->filter.vlan_etype;
3303 eth_offset = vlan ? 16 : 12;
3304 user_offset = vlan ? 18 : 14;
3305
3306 /* Add destination MAC */
3307 if (rule->filter.match_flags & IGC_FILTER_FLAG_DST_MAC_ADDR)
3308 igc_flex_filter_add_field(&flex, &filter->dst_addr, 0,
3309 ETH_ALEN, NULL);
3310
3311 /* Add source MAC */
3312 if (rule->filter.match_flags & IGC_FILTER_FLAG_SRC_MAC_ADDR)
3313 igc_flex_filter_add_field(&flex, &filter->src_addr, 6,
3314 ETH_ALEN, NULL);
3315
3316 /* Add VLAN etype */
3317 if (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_ETYPE)
3318 igc_flex_filter_add_field(&flex, &filter->vlan_etype, 12,
3319 sizeof(filter->vlan_etype),
3320 NULL);
3321
3322 /* Add VLAN TCI */
3323 if (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_TCI)
3324 igc_flex_filter_add_field(&flex, &filter->vlan_tci, 14,
3325 sizeof(filter->vlan_tci), NULL);
3326
3327 /* Add Ether type */
3328 if (rule->filter.match_flags & IGC_FILTER_FLAG_ETHER_TYPE) {
3329 __be16 etype = cpu_to_be16(filter->etype);
3330
3331 igc_flex_filter_add_field(&flex, &etype, eth_offset,
3332 sizeof(etype), NULL);
3333 }
3334
3335 /* Add user data */
3336 if (rule->filter.match_flags & IGC_FILTER_FLAG_USER_DATA)
3337 igc_flex_filter_add_field(&flex, &filter->user_data,
3338 user_offset,
3339 sizeof(filter->user_data),
3340 filter->user_mask);
3341
3342 /* Add it down to the hardware and enable it. */
3343 ret = igc_write_flex_filter_ll(adapter, &flex);
3344 if (ret)
3345 return ret;
3346
3347 filter->flex_index = index;
3348
3349 return 0;
3350}
3351
3352static void igc_del_flex_filter(struct igc_adapter *adapter,
3353 u16 reg_index)
3354{
3355 struct igc_hw *hw = &adapter->hw;
3356 u32 wufc;
3357
3358 /* Just disable the filter. The filter table itself is kept
3359 * intact. Another flex_filter_add() should override the "old" data
3360 * then.
3361 */
3362 if (reg_index > 8) {
3363 u32 wufc_ext = rd32(IGC_WUFC_EXT);
3364
3365 wufc_ext &= ~(IGC_WUFC_EXT_FLX8 << (reg_index - 8));
3366 wr32(IGC_WUFC_EXT, wufc_ext);
3367 } else {
3368 wufc = rd32(IGC_WUFC);
3369
3370 wufc &= ~(IGC_WUFC_FLX0 << reg_index);
3371 wr32(IGC_WUFC, wufc);
3372 }
3373
3374 if (igc_flex_filter_in_use(adapter))
3375 return;
3376
3377 /* No filters are in use, we may disable flex filters */
3378 wufc = rd32(IGC_WUFC);
3379 wufc &= ~IGC_WUFC_FLEX_HQ;
3380 wr32(IGC_WUFC, wufc);
3381}
3382
36fa2152 3383static int igc_enable_nfc_rule(struct igc_adapter *adapter,
2b477d05 3384 struct igc_nfc_rule *rule)
36fa2152
AG
3385{
3386 int err;
3387
73744262
KK
3388 if (rule->flex) {
3389 return igc_add_flex_filter(adapter, rule);
2b477d05
KK
3390 }
3391
36fa2152
AG
3392 if (rule->filter.match_flags & IGC_FILTER_FLAG_ETHER_TYPE) {
3393 err = igc_add_etype_filter(adapter, rule->filter.etype,
3394 rule->action);
3395 if (err)
3396 return err;
3397 }
3398
3399 if (rule->filter.match_flags & IGC_FILTER_FLAG_SRC_MAC_ADDR) {
3400 err = igc_add_mac_filter(adapter, IGC_MAC_FILTER_TYPE_SRC,
3401 rule->filter.src_addr, rule->action);
3402 if (err)
3403 return err;
3404 }
3405
3406 if (rule->filter.match_flags & IGC_FILTER_FLAG_DST_MAC_ADDR) {
3407 err = igc_add_mac_filter(adapter, IGC_MAC_FILTER_TYPE_DST,
3408 rule->filter.dst_addr, rule->action);
3409 if (err)
3410 return err;
3411 }
3412
3413 if (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_TCI) {
3414 int prio = (rule->filter.vlan_tci & VLAN_PRIO_MASK) >>
3415 VLAN_PRIO_SHIFT;
3416
3417 err = igc_add_vlan_prio_filter(adapter, prio, rule->action);
3418 if (err)
3419 return err;
3420 }
3421
3422 return 0;
3423}
3424
acda576f
AG
3425static void igc_disable_nfc_rule(struct igc_adapter *adapter,
3426 const struct igc_nfc_rule *rule)
36fa2152 3427{
73744262 3428 if (rule->flex) {
2b477d05 3429 igc_del_flex_filter(adapter, rule->filter.flex_index);
73744262
KK
3430 return;
3431 }
2b477d05 3432
36fa2152
AG
3433 if (rule->filter.match_flags & IGC_FILTER_FLAG_ETHER_TYPE)
3434 igc_del_etype_filter(adapter, rule->filter.etype);
3435
3436 if (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_TCI) {
3437 int prio = (rule->filter.vlan_tci & VLAN_PRIO_MASK) >>
3438 VLAN_PRIO_SHIFT;
3439
3440 igc_del_vlan_prio_filter(adapter, prio);
3441 }
3442
3443 if (rule->filter.match_flags & IGC_FILTER_FLAG_SRC_MAC_ADDR)
3444 igc_del_mac_filter(adapter, IGC_MAC_FILTER_TYPE_SRC,
3445 rule->filter.src_addr);
3446
3447 if (rule->filter.match_flags & IGC_FILTER_FLAG_DST_MAC_ADDR)
3448 igc_del_mac_filter(adapter, IGC_MAC_FILTER_TYPE_DST,
3449 rule->filter.dst_addr);
36fa2152
AG
3450}
3451
3452/**
3453 * igc_get_nfc_rule() - Get NFC rule
3454 * @adapter: Pointer to adapter
3455 * @location: Rule location
3456 *
3457 * Context: Expects adapter->nfc_rule_lock to be held by caller.
3458 *
3459 * Return: Pointer to NFC rule at @location. If not found, NULL.
3460 */
3461struct igc_nfc_rule *igc_get_nfc_rule(struct igc_adapter *adapter,
3462 u32 location)
3463{
3464 struct igc_nfc_rule *rule;
3465
3466 list_for_each_entry(rule, &adapter->nfc_rule_list, list) {
3467 if (rule->location == location)
3468 return rule;
3469 if (rule->location > location)
3470 break;
3471 }
3472
3473 return NULL;
3474}
3475
3476/**
3477 * igc_del_nfc_rule() - Delete NFC rule
3478 * @adapter: Pointer to adapter
3479 * @rule: Pointer to rule to be deleted
3480 *
3481 * Disable NFC rule in hardware and delete it from adapter.
3482 *
3483 * Context: Expects adapter->nfc_rule_lock to be held by caller.
3484 */
3485void igc_del_nfc_rule(struct igc_adapter *adapter, struct igc_nfc_rule *rule)
3486{
3487 igc_disable_nfc_rule(adapter, rule);
3488
3489 list_del(&rule->list);
3490 adapter->nfc_rule_count--;
3491
3492 kfree(rule);
3493}
3494
e256ec83
AG
3495static void igc_flush_nfc_rules(struct igc_adapter *adapter)
3496{
3497 struct igc_nfc_rule *rule, *tmp;
3498
42fc5dc0 3499 mutex_lock(&adapter->nfc_rule_lock);
e256ec83
AG
3500
3501 list_for_each_entry_safe(rule, tmp, &adapter->nfc_rule_list, list)
3502 igc_del_nfc_rule(adapter, rule);
3503
42fc5dc0 3504 mutex_unlock(&adapter->nfc_rule_lock);
e256ec83
AG
3505}
3506
36fa2152
AG
3507/**
3508 * igc_add_nfc_rule() - Add NFC rule
3509 * @adapter: Pointer to adapter
3510 * @rule: Pointer to rule to be added
3511 *
3512 * Enable NFC rule in hardware and add it to adapter.
3513 *
3514 * Context: Expects adapter->nfc_rule_lock to be held by caller.
3515 *
3516 * Return: 0 on success, negative errno on failure.
3517 */
3518int igc_add_nfc_rule(struct igc_adapter *adapter, struct igc_nfc_rule *rule)
3519{
3520 struct igc_nfc_rule *pred, *cur;
3521 int err;
3522
3523 err = igc_enable_nfc_rule(adapter, rule);
3524 if (err)
3525 return err;
3526
3527 pred = NULL;
3528 list_for_each_entry(cur, &adapter->nfc_rule_list, list) {
3529 if (cur->location >= rule->location)
3530 break;
3531 pred = cur;
86a4de66
SN
3532 }
3533
36fa2152
AG
3534 list_add(&rule->list, pred ? &pred->list : &adapter->nfc_rule_list);
3535 adapter->nfc_rule_count++;
3536 return 0;
3537}
3538
3539static void igc_restore_nfc_rules(struct igc_adapter *adapter)
3540{
3541 struct igc_nfc_rule *rule;
3542
42fc5dc0 3543 mutex_lock(&adapter->nfc_rule_lock);
36fa2152
AG
3544
3545 list_for_each_entry_reverse(rule, &adapter->nfc_rule_list, list)
3546 igc_enable_nfc_rule(adapter, rule);
3547
42fc5dc0 3548 mutex_unlock(&adapter->nfc_rule_lock);
86a4de66
SN
3549}
3550
3551static int igc_uc_sync(struct net_device *netdev, const unsigned char *addr)
3552{
3553 struct igc_adapter *adapter = netdev_priv(netdev);
86a4de66 3554
750433d0 3555 return igc_add_mac_filter(adapter, IGC_MAC_FILTER_TYPE_DST, addr, -1);
86a4de66
SN
3556}
3557
3558static int igc_uc_unsync(struct net_device *netdev, const unsigned char *addr)
3559{
3560 struct igc_adapter *adapter = netdev_priv(netdev);
3561
acda576f 3562 igc_del_mac_filter(adapter, IGC_MAC_FILTER_TYPE_DST, addr);
86a4de66
SN
3563 return 0;
3564}
3565
3566/**
3567 * igc_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set
3568 * @netdev: network interface device structure
3569 *
3570 * The set_rx_mode entry point is called whenever the unicast or multicast
3571 * address lists or the network interface flags are updated. This routine is
3572 * responsible for configuring the hardware for proper unicast, multicast,
3573 * promiscuous mode, and all-multi behavior.
3574 */
3575static void igc_set_rx_mode(struct net_device *netdev)
3576{
3577 struct igc_adapter *adapter = netdev_priv(netdev);
3578 struct igc_hw *hw = &adapter->hw;
3579 u32 rctl = 0, rlpml = MAX_JUMBO_FRAME_SIZE;
3580 int count;
3581
3582 /* Check for Promiscuous and All Multicast modes */
3583 if (netdev->flags & IFF_PROMISC) {
3584 rctl |= IGC_RCTL_UPE | IGC_RCTL_MPE;
3585 } else {
3586 if (netdev->flags & IFF_ALLMULTI) {
3587 rctl |= IGC_RCTL_MPE;
3588 } else {
3589 /* Write addresses to the MTA, if the attempt fails
3590 * then we should just turn on promiscuous mode so
3591 * that we can at least receive multicast traffic
3592 */
3593 count = igc_write_mc_addr_list(netdev);
3594 if (count < 0)
3595 rctl |= IGC_RCTL_MPE;
3596 }
3597 }
3598
3599 /* Write addresses to available RAR registers, if there is not
3600 * sufficient space to store all the addresses then enable
3601 * unicast promiscuous mode
3602 */
3603 if (__dev_uc_sync(netdev, igc_uc_sync, igc_uc_unsync))
3604 rctl |= IGC_RCTL_UPE;
3605
3606 /* update state of unicast and multicast */
3607 rctl |= rd32(IGC_RCTL) & ~(IGC_RCTL_UPE | IGC_RCTL_MPE);
3608 wr32(IGC_RCTL, rctl);
3609
3610#if (PAGE_SIZE < 8192)
3611 if (adapter->max_frame_size <= IGC_MAX_FRAME_BUILD_SKB)
3612 rlpml = IGC_MAX_FRAME_BUILD_SKB;
3613#endif
3614 wr32(IGC_RLPML, rlpml);
3615}
3616
1a7c0f2e
SN
3617/**
3618 * igc_configure - configure the hardware for RX and TX
3619 * @adapter: private board structure
3620 */
3621static void igc_configure(struct igc_adapter *adapter)
3622{
3623 struct net_device *netdev = adapter->netdev;
3624 int i = 0;
3625
3626 igc_get_hw_control(adapter);
3627 igc_set_rx_mode(netdev);
3628
8d744963
MHZ
3629 igc_restore_vlan(adapter);
3630
1a7c0f2e
SN
3631 igc_setup_tctl(adapter);
3632 igc_setup_mrqc(adapter);
3633 igc_setup_rctl(adapter);
3634
ac9156b2 3635 igc_set_default_mac_filter(adapter);
97700bc8 3636 igc_restore_nfc_rules(adapter);
ac9156b2 3637
1a7c0f2e
SN
3638 igc_configure_tx(adapter);
3639 igc_configure_rx(adapter);
3640
3641 igc_rx_fifo_flush_base(&adapter->hw);
3642
3643 /* call igc_desc_unused which always leaves
3644 * at least 1 descriptor unused to make sure
3645 * next_to_use != next_to_clean
3646 */
3647 for (i = 0; i < adapter->num_rx_queues; i++) {
3648 struct igc_ring *ring = adapter->rx_ring[i];
3649
fc9df2a0
AG
3650 if (ring->xsk_pool)
3651 igc_alloc_rx_buffers_zc(ring, igc_desc_unused(ring));
3652 else
3653 igc_alloc_rx_buffers(ring, igc_desc_unused(ring));
1a7c0f2e
SN
3654 }
3655}
3656
f817fa05
SN
3657/**
3658 * igc_write_ivar - configure ivar for given MSI-X vector
3659 * @hw: pointer to the HW structure
3660 * @msix_vector: vector number we are allocating to a given ring
3661 * @index: row index of IVAR register to write within IVAR table
3662 * @offset: column offset of in IVAR, should be multiple of 8
3663 *
3664 * The IVAR table consists of 2 columns,
3665 * each containing an cause allocation for an Rx and Tx ring, and a
3666 * variable number of rows depending on the number of queues supported.
3667 */
3668static void igc_write_ivar(struct igc_hw *hw, int msix_vector,
3669 int index, int offset)
3670{
3671 u32 ivar = array_rd32(IGC_IVAR0, index);
3672
3673 /* clear any bits that are currently set */
3674 ivar &= ~((u32)0xFF << offset);
3675
3676 /* write vector and valid bit */
3677 ivar |= (msix_vector | IGC_IVAR_VALID) << offset;
3678
3679 array_wr32(IGC_IVAR0, index, ivar);
3680}
3681
3682static void igc_assign_vector(struct igc_q_vector *q_vector, int msix_vector)
3683{
3684 struct igc_adapter *adapter = q_vector->adapter;
3685 struct igc_hw *hw = &adapter->hw;
3686 int rx_queue = IGC_N0_QUEUE;
3687 int tx_queue = IGC_N0_QUEUE;
3688
3689 if (q_vector->rx.ring)
3690 rx_queue = q_vector->rx.ring->reg_idx;
3691 if (q_vector->tx.ring)
3692 tx_queue = q_vector->tx.ring->reg_idx;
3693
3694 switch (hw->mac.type) {
3695 case igc_i225:
3696 if (rx_queue > IGC_N0_QUEUE)
3697 igc_write_ivar(hw, msix_vector,
3698 rx_queue >> 1,
3699 (rx_queue & 0x1) << 4);
3700 if (tx_queue > IGC_N0_QUEUE)
3701 igc_write_ivar(hw, msix_vector,
3702 tx_queue >> 1,
3703 ((tx_queue & 0x1) << 4) + 8);
3704 q_vector->eims_value = BIT(msix_vector);
3705 break;
3706 default:
3707 WARN_ONCE(hw->mac.type != igc_i225, "Wrong MAC type\n");
3708 break;
3709 }
3710
3711 /* add q_vector eims value to global eims_enable_mask */
3712 adapter->eims_enable_mask |= q_vector->eims_value;
3713
3714 /* configure q_vector to set itr on first interrupt */
3715 q_vector->set_itr = 1;
3716}
3717
a146ea02
SN
3718/**
3719 * igc_configure_msix - Configure MSI-X hardware
3720 * @adapter: Pointer to adapter structure
3721 *
3722 * igc_configure_msix sets up the hardware to properly
3723 * generate MSI-X interrupts.
3724 */
3725static void igc_configure_msix(struct igc_adapter *adapter)
3726{
3727 struct igc_hw *hw = &adapter->hw;
3728 int i, vector = 0;
3729 u32 tmp;
3730
3731 adapter->eims_enable_mask = 0;
3732
3733 /* set vector for other causes, i.e. link changes */
3734 switch (hw->mac.type) {
3735 case igc_i225:
3736 /* Turn on MSI-X capability first, or our settings
3737 * won't stick. And it will take days to debug.
3738 */
3739 wr32(IGC_GPIE, IGC_GPIE_MSIX_MODE |
3740 IGC_GPIE_PBA | IGC_GPIE_EIAME |
3741 IGC_GPIE_NSICR);
3742
3743 /* enable msix_other interrupt */
3744 adapter->eims_other = BIT(vector);
3745 tmp = (vector++ | IGC_IVAR_VALID) << 8;
3746
3747 wr32(IGC_IVAR_MISC, tmp);
3748 break;
3749 default:
3750 /* do nothing, since nothing else supports MSI-X */
3751 break;
3752 } /* switch (hw->mac.type) */
3753
3754 adapter->eims_enable_mask |= adapter->eims_other;
3755
3756 for (i = 0; i < adapter->num_q_vectors; i++)
3757 igc_assign_vector(adapter->q_vector[i], vector++);
3758
3759 wrfl();
3760}
3761
fccf939e
SN
3762/**
3763 * igc_irq_enable - Enable default interrupt generation settings
3764 * @adapter: board private structure
3765 */
3766static void igc_irq_enable(struct igc_adapter *adapter)
3767{
3768 struct igc_hw *hw = &adapter->hw;
3769
3770 if (adapter->msix_entries) {
3771 u32 ims = IGC_IMS_LSC | IGC_IMS_DOUTSYNC | IGC_IMS_DRSTA;
3772 u32 regval = rd32(IGC_EIAC);
3773
3774 wr32(IGC_EIAC, regval | adapter->eims_enable_mask);
3775 regval = rd32(IGC_EIAM);
3776 wr32(IGC_EIAM, regval | adapter->eims_enable_mask);
3777 wr32(IGC_EIMS, adapter->eims_enable_mask);
3778 wr32(IGC_IMS, ims);
3779 } else {
3780 wr32(IGC_IMS, IMS_ENABLE_MASK | IGC_IMS_DRSTA);
3781 wr32(IGC_IAM, IMS_ENABLE_MASK | IGC_IMS_DRSTA);
3782 }
3783}
3784
35f9a78a
SN
3785/**
3786 * igc_irq_disable - Mask off interrupt generation on the NIC
3787 * @adapter: board private structure
3788 */
3789static void igc_irq_disable(struct igc_adapter *adapter)
3790{
3791 struct igc_hw *hw = &adapter->hw;
3792
3793 if (adapter->msix_entries) {
3794 u32 regval = rd32(IGC_EIAM);
3795
3796 wr32(IGC_EIAM, regval & ~adapter->eims_enable_mask);
3797 wr32(IGC_EIMC, adapter->eims_enable_mask);
3798 regval = rd32(IGC_EIAC);
3799 wr32(IGC_EIAC, regval & ~adapter->eims_enable_mask);
3800 }
3801
3802 wr32(IGC_IAM, 0);
3803 wr32(IGC_IMC, ~0);
3804 wrfl();
3805
3806 if (adapter->msix_entries) {
3807 int vector = 0, i;
3808
3809 synchronize_irq(adapter->msix_entries[vector++].vector);
3810
3811 for (i = 0; i < adapter->num_q_vectors; i++)
3812 synchronize_irq(adapter->msix_entries[vector++].vector);
3813 } else {
3814 synchronize_irq(adapter->pdev->irq);
3815 }
3816}
3817
63c92c9d
SN
3818void igc_set_flag_queue_pairs(struct igc_adapter *adapter,
3819 const u32 max_rss_queues)
3820{
3821 /* Determine if we need to pair queues. */
3822 /* If rss_queues > half of max_rss_queues, pair the queues in
3823 * order to conserve interrupts due to limited supply.
3824 */
3825 if (adapter->rss_queues > (max_rss_queues / 2))
3826 adapter->flags |= IGC_FLAG_QUEUE_PAIRS;
3827 else
3828 adapter->flags &= ~IGC_FLAG_QUEUE_PAIRS;
3829}
3830
3831unsigned int igc_get_max_rss_queues(struct igc_adapter *adapter)
3832{
4d0710c2 3833 return IGC_MAX_RX_QUEUES;
63c92c9d
SN
3834}
3835
3836static void igc_init_queue_configuration(struct igc_adapter *adapter)
3837{
3838 u32 max_rss_queues;
3839
3840 max_rss_queues = igc_get_max_rss_queues(adapter);
3841 adapter->rss_queues = min_t(u32, max_rss_queues, num_online_cpus());
3842
3843 igc_set_flag_queue_pairs(adapter, max_rss_queues);
3844}
3845
c9a11c23 3846/**
63c92c9d
SN
3847 * igc_reset_q_vector - Reset config for interrupt vector
3848 * @adapter: board private structure to initialize
3849 * @v_idx: Index of vector to be reset
3850 *
3851 * If NAPI is enabled it will delete any references to the
3852 * NAPI struct. This is preparation for igc_free_q_vector.
c9a11c23 3853 */
63c92c9d 3854static void igc_reset_q_vector(struct igc_adapter *adapter, int v_idx)
c9a11c23 3855{
63c92c9d 3856 struct igc_q_vector *q_vector = adapter->q_vector[v_idx];
c9a11c23 3857
63c92c9d
SN
3858 /* if we're coming from igc_set_interrupt_capability, the vectors are
3859 * not yet allocated
3860 */
3861 if (!q_vector)
3862 return;
c9a11c23 3863
63c92c9d
SN
3864 if (q_vector->tx.ring)
3865 adapter->tx_ring[q_vector->tx.ring->queue_index] = NULL;
c9a11c23 3866
63c92c9d
SN
3867 if (q_vector->rx.ring)
3868 adapter->rx_ring[q_vector->rx.ring->queue_index] = NULL;
3df25e4c 3869
63c92c9d
SN
3870 netif_napi_del(&q_vector->napi);
3871}
3df25e4c 3872
63c92c9d
SN
3873/**
3874 * igc_free_q_vector - Free memory allocated for specific interrupt vector
3875 * @adapter: board private structure to initialize
3876 * @v_idx: Index of vector to be freed
3877 *
3878 * This function frees the memory allocated to the q_vector.
3879 */
3880static void igc_free_q_vector(struct igc_adapter *adapter, int v_idx)
3881{
3882 struct igc_q_vector *q_vector = adapter->q_vector[v_idx];
13b5b7fd 3883
63c92c9d 3884 adapter->q_vector[v_idx] = NULL;
13b5b7fd 3885
63c92c9d
SN
3886 /* igc_get_stats64() might access the rings on this vector,
3887 * we must wait a grace period before freeing it.
3888 */
3889 if (q_vector)
3890 kfree_rcu(q_vector, rcu);
c9a11c23
SN
3891}
3892
3893/**
63c92c9d
SN
3894 * igc_free_q_vectors - Free memory allocated for interrupt vectors
3895 * @adapter: board private structure to initialize
3896 *
3897 * This function frees the memory allocated to the q_vectors. In addition if
3898 * NAPI is enabled it will delete any references to the NAPI struct prior
3899 * to freeing the q_vector.
c9a11c23 3900 */
63c92c9d 3901static void igc_free_q_vectors(struct igc_adapter *adapter)
c9a11c23 3902{
63c92c9d 3903 int v_idx = adapter->num_q_vectors;
36b9fea6 3904
63c92c9d
SN
3905 adapter->num_tx_queues = 0;
3906 adapter->num_rx_queues = 0;
3907 adapter->num_q_vectors = 0;
36b9fea6 3908
63c92c9d
SN
3909 while (v_idx--) {
3910 igc_reset_q_vector(adapter, v_idx);
3911 igc_free_q_vector(adapter, v_idx);
3912 }
3913}
36b9fea6 3914
63c92c9d
SN
3915/**
3916 * igc_update_itr - update the dynamic ITR value based on statistics
3917 * @q_vector: pointer to q_vector
3918 * @ring_container: ring info to update the itr for
3919 *
3920 * Stores a new ITR value based on packets and byte
3921 * counts during the last interrupt. The advantage of per interrupt
3922 * computation is faster updates and more accurate ITR for the current
3923 * traffic pattern. Constants in this function were computed
3924 * based on theoretical maximum wire speed and thresholds were set based
3925 * on testing data as well as attempting to minimize response time
3926 * while increasing bulk throughput.
3927 * NOTE: These calculations are only valid when operating in a single-
3928 * queue environment.
3929 */
3930static void igc_update_itr(struct igc_q_vector *q_vector,
3931 struct igc_ring_container *ring_container)
3932{
3933 unsigned int packets = ring_container->total_packets;
3934 unsigned int bytes = ring_container->total_bytes;
3935 u8 itrval = ring_container->itr;
36b9fea6 3936
63c92c9d
SN
3937 /* no packets, exit with status unchanged */
3938 if (packets == 0)
3939 return;
36b9fea6 3940
63c92c9d
SN
3941 switch (itrval) {
3942 case lowest_latency:
3943 /* handle TSO and jumbo frames */
3944 if (bytes / packets > 8000)
3945 itrval = bulk_latency;
3946 else if ((packets < 5) && (bytes > 512))
3947 itrval = low_latency;
3948 break;
3949 case low_latency: /* 50 usec aka 20000 ints/s */
3950 if (bytes > 10000) {
3951 /* this if handles the TSO accounting */
3952 if (bytes / packets > 8000)
3953 itrval = bulk_latency;
3954 else if ((packets < 10) || ((bytes / packets) > 1200))
3955 itrval = bulk_latency;
3956 else if ((packets > 35))
3957 itrval = lowest_latency;
3958 } else if (bytes / packets > 2000) {
3959 itrval = bulk_latency;
3960 } else if (packets <= 2 && bytes < 512) {
3961 itrval = lowest_latency;
36b9fea6 3962 }
63c92c9d
SN
3963 break;
3964 case bulk_latency: /* 250 usec aka 4000 ints/s */
3965 if (bytes > 25000) {
3966 if (packets > 35)
3967 itrval = low_latency;
3968 } else if (bytes < 1500) {
3969 itrval = low_latency;
3970 }
3971 break;
36b9fea6
SN
3972 }
3973
63c92c9d
SN
3974 /* clear work counters since we have the values we need */
3975 ring_container->total_bytes = 0;
3976 ring_container->total_packets = 0;
36b9fea6 3977
63c92c9d
SN
3978 /* write updated itr to ring container */
3979 ring_container->itr = itrval;
3980}
36b9fea6 3981
63c92c9d
SN
3982static void igc_set_itr(struct igc_q_vector *q_vector)
3983{
3984 struct igc_adapter *adapter = q_vector->adapter;
3985 u32 new_itr = q_vector->itr_val;
3986 u8 current_itr = 0;
3987
3988 /* for non-gigabit speeds, just fix the interrupt rate at 4000 */
3989 switch (adapter->link_speed) {
3990 case SPEED_10:
3991 case SPEED_100:
3992 current_itr = 0;
3993 new_itr = IGC_4K_ITR;
3994 goto set_itr_now;
3995 default:
3996 break;
36b9fea6 3997 }
36b9fea6 3998
63c92c9d
SN
3999 igc_update_itr(q_vector, &q_vector->tx);
4000 igc_update_itr(q_vector, &q_vector->rx);
36b9fea6 4001
63c92c9d 4002 current_itr = max(q_vector->rx.itr, q_vector->tx.itr);
36b9fea6 4003
63c92c9d
SN
4004 /* conservative mode (itr 3) eliminates the lowest_latency setting */
4005 if (current_itr == lowest_latency &&
4006 ((q_vector->rx.ring && adapter->rx_itr_setting == 3) ||
4007 (!q_vector->rx.ring && adapter->tx_itr_setting == 3)))
4008 current_itr = low_latency;
36b9fea6 4009
63c92c9d
SN
4010 switch (current_itr) {
4011 /* counts and packets in update_itr are dependent on these numbers */
4012 case lowest_latency:
4013 new_itr = IGC_70K_ITR; /* 70,000 ints/sec */
4014 break;
4015 case low_latency:
4016 new_itr = IGC_20K_ITR; /* 20,000 ints/sec */
4017 break;
4018 case bulk_latency:
4019 new_itr = IGC_4K_ITR; /* 4,000 ints/sec */
4020 break;
4021 default:
4022 break;
4023 }
36b9fea6 4024
63c92c9d
SN
4025set_itr_now:
4026 if (new_itr != q_vector->itr_val) {
4027 /* this attempts to bias the interrupt rate towards Bulk
4028 * by adding intermediate steps when interrupt rate is
4029 * increasing
4030 */
4031 new_itr = new_itr > q_vector->itr_val ?
4032 max((new_itr * q_vector->itr_val) /
4033 (new_itr + (q_vector->itr_val >> 2)),
4034 new_itr) : new_itr;
4035 /* Don't write the value here; it resets the adapter's
4036 * internal timer, and causes us to delay far longer than
4037 * we should between interrupts. Instead, we write the ITR
4038 * value at the beginning of the next interrupt so the timing
4039 * ends up being correct.
4040 */
4041 q_vector->itr_val = new_itr;
4042 q_vector->set_itr = 1;
4043 }
c9a11c23
SN
4044}
4045
63c92c9d 4046static void igc_reset_interrupt_capability(struct igc_adapter *adapter)
0507ef8a 4047{
63c92c9d 4048 int v_idx = adapter->num_q_vectors;
6245c848 4049
63c92c9d
SN
4050 if (adapter->msix_entries) {
4051 pci_disable_msix(adapter->pdev);
4052 kfree(adapter->msix_entries);
4053 adapter->msix_entries = NULL;
4054 } else if (adapter->flags & IGC_FLAG_HAS_MSI) {
4055 pci_disable_msi(adapter->pdev);
4056 }
6245c848 4057
63c92c9d
SN
4058 while (v_idx--)
4059 igc_reset_q_vector(adapter, v_idx);
6245c848
SN
4060}
4061
c9a11c23 4062/**
63c92c9d
SN
4063 * igc_set_interrupt_capability - set MSI or MSI-X if supported
4064 * @adapter: Pointer to adapter structure
4065 * @msix: boolean value for MSI-X capability
4066 *
4067 * Attempt to configure interrupts using the best available
4068 * capabilities of the hardware and kernel.
c9a11c23 4069 */
63c92c9d
SN
4070static void igc_set_interrupt_capability(struct igc_adapter *adapter,
4071 bool msix)
c9a11c23 4072{
63c92c9d
SN
4073 int numvecs, i;
4074 int err;
c9a11c23 4075
63c92c9d
SN
4076 if (!msix)
4077 goto msi_only;
4078 adapter->flags |= IGC_FLAG_HAS_MSIX;
c9a11c23 4079
63c92c9d
SN
4080 /* Number of supported queues. */
4081 adapter->num_rx_queues = adapter->rss_queues;
0507ef8a 4082
63c92c9d 4083 adapter->num_tx_queues = adapter->rss_queues;
0507ef8a 4084
63c92c9d
SN
4085 /* start with one vector for every Rx queue */
4086 numvecs = adapter->num_rx_queues;
0507ef8a 4087
63c92c9d
SN
4088 /* if Tx handler is separate add 1 for every Tx queue */
4089 if (!(adapter->flags & IGC_FLAG_QUEUE_PAIRS))
4090 numvecs += adapter->num_tx_queues;
0507ef8a 4091
63c92c9d
SN
4092 /* store the number of vectors reserved for queues */
4093 adapter->num_q_vectors = numvecs;
0507ef8a 4094
63c92c9d
SN
4095 /* add 1 vector for link status interrupts */
4096 numvecs++;
c9a11c23 4097
63c92c9d
SN
4098 adapter->msix_entries = kcalloc(numvecs, sizeof(struct msix_entry),
4099 GFP_KERNEL);
0507ef8a 4100
63c92c9d
SN
4101 if (!adapter->msix_entries)
4102 return;
0507ef8a 4103
63c92c9d
SN
4104 /* populate entry values */
4105 for (i = 0; i < numvecs; i++)
4106 adapter->msix_entries[i].entry = i;
0507ef8a 4107
63c92c9d
SN
4108 err = pci_enable_msix_range(adapter->pdev,
4109 adapter->msix_entries,
4110 numvecs,
4111 numvecs);
4112 if (err > 0)
4113 return;
0507ef8a 4114
63c92c9d
SN
4115 kfree(adapter->msix_entries);
4116 adapter->msix_entries = NULL;
0507ef8a 4117
63c92c9d 4118 igc_reset_interrupt_capability(adapter);
0507ef8a 4119
63c92c9d
SN
4120msi_only:
4121 adapter->flags &= ~IGC_FLAG_HAS_MSIX;
0507ef8a 4122
63c92c9d
SN
4123 adapter->rss_queues = 1;
4124 adapter->flags |= IGC_FLAG_QUEUE_PAIRS;
4125 adapter->num_rx_queues = 1;
4126 adapter->num_tx_queues = 1;
4127 adapter->num_q_vectors = 1;
4128 if (!pci_enable_msi(adapter->pdev))
4129 adapter->flags |= IGC_FLAG_HAS_MSI;
c9a11c23
SN
4130}
4131
4132/**
63c92c9d
SN
4133 * igc_update_ring_itr - update the dynamic ITR value based on packet size
4134 * @q_vector: pointer to q_vector
c9a11c23 4135 *
63c92c9d
SN
4136 * Stores a new ITR value based on strictly on packet size. This
4137 * algorithm is less sophisticated than that used in igc_update_itr,
4138 * due to the difficulty of synchronizing statistics across multiple
4139 * receive rings. The divisors and thresholds used by this function
4140 * were determined based on theoretical maximum wire speed and testing
4141 * data, in order to minimize response time while increasing bulk
4142 * throughput.
4143 * NOTE: This function is called only when operating in a multiqueue
4144 * receive environment.
c9a11c23 4145 */
63c92c9d 4146static void igc_update_ring_itr(struct igc_q_vector *q_vector)
c9a11c23 4147{
63c92c9d
SN
4148 struct igc_adapter *adapter = q_vector->adapter;
4149 int new_val = q_vector->itr_val;
4150 int avg_wire_size = 0;
4151 unsigned int packets;
c9a11c23 4152
63c92c9d
SN
4153 /* For non-gigabit speeds, just fix the interrupt rate at 4000
4154 * ints/sec - ITR timer value of 120 ticks.
4155 */
4156 switch (adapter->link_speed) {
4157 case SPEED_10:
4158 case SPEED_100:
4159 new_val = IGC_4K_ITR;
4160 goto set_itr_val;
4161 default:
4162 break;
4163 }
c9a11c23 4164
63c92c9d
SN
4165 packets = q_vector->rx.total_packets;
4166 if (packets)
4167 avg_wire_size = q_vector->rx.total_bytes / packets;
c9a11c23 4168
63c92c9d
SN
4169 packets = q_vector->tx.total_packets;
4170 if (packets)
4171 avg_wire_size = max_t(u32, avg_wire_size,
4172 q_vector->tx.total_bytes / packets);
c9a11c23 4173
63c92c9d
SN
4174 /* if avg_wire_size isn't set no work was done */
4175 if (!avg_wire_size)
4176 goto clear_counts;
c9a11c23 4177
63c92c9d
SN
4178 /* Add 24 bytes to size to account for CRC, preamble, and gap */
4179 avg_wire_size += 24;
c9a11c23 4180
63c92c9d
SN
4181 /* Don't starve jumbo frames */
4182 avg_wire_size = min(avg_wire_size, 3000);
4183
4184 /* Give a little boost to mid-size frames */
4185 if (avg_wire_size > 300 && avg_wire_size < 1200)
4186 new_val = avg_wire_size / 3;
c9a11c23 4187 else
63c92c9d 4188 new_val = avg_wire_size / 2;
c9a11c23 4189
63c92c9d
SN
4190 /* conservative mode (itr 3) eliminates the lowest_latency setting */
4191 if (new_val < IGC_20K_ITR &&
4192 ((q_vector->rx.ring && adapter->rx_itr_setting == 3) ||
4193 (!q_vector->rx.ring && adapter->tx_itr_setting == 3)))
4194 new_val = IGC_20K_ITR;
c9a11c23 4195
63c92c9d
SN
4196set_itr_val:
4197 if (new_val != q_vector->itr_val) {
4198 q_vector->itr_val = new_val;
4199 q_vector->set_itr = 1;
4200 }
4201clear_counts:
4202 q_vector->rx.total_bytes = 0;
4203 q_vector->rx.total_packets = 0;
4204 q_vector->tx.total_bytes = 0;
4205 q_vector->tx.total_packets = 0;
c9a11c23
SN
4206}
4207
63c92c9d 4208static void igc_ring_irq_enable(struct igc_q_vector *q_vector)
c9a11c23 4209{
63c92c9d
SN
4210 struct igc_adapter *adapter = q_vector->adapter;
4211 struct igc_hw *hw = &adapter->hw;
c9a11c23 4212
63c92c9d
SN
4213 if ((q_vector->rx.ring && (adapter->rx_itr_setting & 3)) ||
4214 (!q_vector->rx.ring && (adapter->tx_itr_setting & 3))) {
4215 if (adapter->num_q_vectors == 1)
4216 igc_set_itr(q_vector);
4217 else
4218 igc_update_ring_itr(q_vector);
4219 }
c9a11c23 4220
63c92c9d
SN
4221 if (!test_bit(__IGC_DOWN, &adapter->state)) {
4222 if (adapter->msix_entries)
4223 wr32(IGC_EIMS, q_vector->eims_value);
4224 else
4225 igc_irq_enable(adapter);
4226 }
c9a11c23
SN
4227}
4228
63c92c9d
SN
4229static void igc_add_ring(struct igc_ring *ring,
4230 struct igc_ring_container *head)
65cd3a72 4231{
63c92c9d
SN
4232 head->ring = ring;
4233 head->count++;
65cd3a72
SN
4234}
4235
63c92c9d
SN
4236/**
4237 * igc_cache_ring_register - Descriptor ring to register mapping
4238 * @adapter: board private structure to initialize
4239 *
4240 * Once we know the feature-set enabled for the device, we'll cache
4241 * the register offset the descriptor ring is assigned to.
4242 */
4243static void igc_cache_ring_register(struct igc_adapter *adapter)
65cd3a72 4244{
63c92c9d 4245 int i = 0, j = 0;
65cd3a72 4246
63c92c9d
SN
4247 switch (adapter->hw.mac.type) {
4248 case igc_i225:
63c92c9d
SN
4249 default:
4250 for (; i < adapter->num_rx_queues; i++)
4251 adapter->rx_ring[i]->reg_idx = i;
4252 for (; j < adapter->num_tx_queues; j++)
4253 adapter->tx_ring[j]->reg_idx = j;
4254 break;
65cd3a72 4255 }
65cd3a72
SN
4256}
4257
63c92c9d
SN
4258/**
4259 * igc_poll - NAPI Rx polling callback
4260 * @napi: napi polling structure
4261 * @budget: count of how many packets we should handle
4262 */
4263static int igc_poll(struct napi_struct *napi, int budget)
65cd3a72 4264{
63c92c9d
SN
4265 struct igc_q_vector *q_vector = container_of(napi,
4266 struct igc_q_vector,
4267 napi);
fc9df2a0 4268 struct igc_ring *rx_ring = q_vector->rx.ring;
63c92c9d
SN
4269 bool clean_complete = true;
4270 int work_done = 0;
65cd3a72 4271
63c92c9d
SN
4272 if (q_vector->tx.ring)
4273 clean_complete = igc_clean_tx_irq(q_vector, budget);
65cd3a72 4274
fc9df2a0
AG
4275 if (rx_ring) {
4276 int cleaned = rx_ring->xsk_pool ?
4277 igc_clean_rx_irq_zc(q_vector, budget) :
4278 igc_clean_rx_irq(q_vector, budget);
65cd3a72 4279
63c92c9d
SN
4280 work_done += cleaned;
4281 if (cleaned >= budget)
4282 clean_complete = false;
4283 }
4284
4285 /* If all work not completed, return budget and keep polling */
4286 if (!clean_complete)
4287 return budget;
4288
4289 /* Exit the polling mode, but don't re-enable interrupts if stack might
4290 * poll us due to busy-polling
65cd3a72 4291 */
63c92c9d
SN
4292 if (likely(napi_complete_done(napi, work_done)))
4293 igc_ring_irq_enable(q_vector);
65cd3a72 4294
63c92c9d 4295 return min(work_done, budget - 1);
65cd3a72
SN
4296}
4297
63c92c9d
SN
4298/**
4299 * igc_alloc_q_vector - Allocate memory for a single interrupt vector
4300 * @adapter: board private structure to initialize
4301 * @v_count: q_vectors allocated on adapter, used for ring interleaving
4302 * @v_idx: index of vector in adapter struct
4303 * @txr_count: total number of Tx rings to allocate
4304 * @txr_idx: index of first Tx ring to allocate
4305 * @rxr_count: total number of Rx rings to allocate
4306 * @rxr_idx: index of first Rx ring to allocate
4307 *
4308 * We allocate one q_vector. If allocation fails we return -ENOMEM.
6245c848 4309 */
63c92c9d
SN
4310static int igc_alloc_q_vector(struct igc_adapter *adapter,
4311 unsigned int v_count, unsigned int v_idx,
4312 unsigned int txr_count, unsigned int txr_idx,
4313 unsigned int rxr_count, unsigned int rxr_idx)
6245c848 4314{
63c92c9d
SN
4315 struct igc_q_vector *q_vector;
4316 struct igc_ring *ring;
4317 int ring_count;
6245c848 4318
63c92c9d
SN
4319 /* igc only supports 1 Tx and/or 1 Rx queue per vector */
4320 if (txr_count > 1 || rxr_count > 1)
4321 return -ENOMEM;
6245c848 4322
63c92c9d 4323 ring_count = txr_count + rxr_count;
6245c848 4324
63c92c9d
SN
4325 /* allocate q_vector and rings */
4326 q_vector = adapter->q_vector[v_idx];
4327 if (!q_vector)
4328 q_vector = kzalloc(struct_size(q_vector, ring, ring_count),
4329 GFP_KERNEL);
4330 else
4331 memset(q_vector, 0, struct_size(q_vector, ring, ring_count));
4332 if (!q_vector)
4333 return -ENOMEM;
6245c848 4334
63c92c9d
SN
4335 /* initialize NAPI */
4336 netif_napi_add(adapter->netdev, &q_vector->napi,
4337 igc_poll, 64);
6245c848 4338
63c92c9d
SN
4339 /* tie q_vector and adapter together */
4340 adapter->q_vector[v_idx] = q_vector;
4341 q_vector->adapter = adapter;
6245c848 4342
63c92c9d
SN
4343 /* initialize work limits */
4344 q_vector->tx.work_limit = adapter->tx_work_limit;
6245c848 4345
63c92c9d
SN
4346 /* initialize ITR configuration */
4347 q_vector->itr_register = adapter->io_addr + IGC_EITR(0);
4348 q_vector->itr_val = IGC_START_ITR;
6245c848 4349
63c92c9d
SN
4350 /* initialize pointer to rings */
4351 ring = q_vector->ring;
6245c848 4352
63c92c9d
SN
4353 /* initialize ITR */
4354 if (rxr_count) {
4355 /* rx or rx/tx vector */
4356 if (!adapter->rx_itr_setting || adapter->rx_itr_setting > 3)
4357 q_vector->itr_val = adapter->rx_itr_setting;
4358 } else {
4359 /* tx only vector */
4360 if (!adapter->tx_itr_setting || adapter->tx_itr_setting > 3)
4361 q_vector->itr_val = adapter->tx_itr_setting;
4362 }
6245c848 4363
63c92c9d
SN
4364 if (txr_count) {
4365 /* assign generic ring traits */
4366 ring->dev = &adapter->pdev->dev;
4367 ring->netdev = adapter->netdev;
6245c848 4368
63c92c9d
SN
4369 /* configure backlink on ring */
4370 ring->q_vector = q_vector;
4371
4372 /* update q_vector Tx values */
4373 igc_add_ring(ring, &q_vector->tx);
4374
4375 /* apply Tx specific ring traits */
4376 ring->count = adapter->tx_ring_count;
4377 ring->queue_index = txr_idx;
4378
4379 /* assign ring to adapter */
4380 adapter->tx_ring[txr_idx] = ring;
4381
4382 /* push pointer to next ring */
4383 ring++;
6245c848
SN
4384 }
4385
63c92c9d
SN
4386 if (rxr_count) {
4387 /* assign generic ring traits */
4388 ring->dev = &adapter->pdev->dev;
4389 ring->netdev = adapter->netdev;
6245c848 4390
63c92c9d
SN
4391 /* configure backlink on ring */
4392 ring->q_vector = q_vector;
4393
4394 /* update q_vector Rx values */
4395 igc_add_ring(ring, &q_vector->rx);
4396
4397 /* apply Rx specific ring traits */
4398 ring->count = adapter->rx_ring_count;
4399 ring->queue_index = rxr_idx;
4400
4401 /* assign ring to adapter */
4402 adapter->rx_ring[rxr_idx] = ring;
4403 }
4404
4405 return 0;
6245c848
SN
4406}
4407
3df25e4c 4408/**
63c92c9d
SN
4409 * igc_alloc_q_vectors - Allocate memory for interrupt vectors
4410 * @adapter: board private structure to initialize
4411 *
4412 * We allocate one q_vector per queue interrupt. If allocation fails we
4413 * return -ENOMEM.
3df25e4c 4414 */
63c92c9d 4415static int igc_alloc_q_vectors(struct igc_adapter *adapter)
3df25e4c 4416{
63c92c9d
SN
4417 int rxr_remaining = adapter->num_rx_queues;
4418 int txr_remaining = adapter->num_tx_queues;
4419 int rxr_idx = 0, txr_idx = 0, v_idx = 0;
4420 int q_vectors = adapter->num_q_vectors;
4421 int err;
3df25e4c 4422
63c92c9d
SN
4423 if (q_vectors >= (rxr_remaining + txr_remaining)) {
4424 for (; rxr_remaining; v_idx++) {
4425 err = igc_alloc_q_vector(adapter, q_vectors, v_idx,
4426 0, 0, 1, rxr_idx);
3df25e4c 4427
63c92c9d
SN
4428 if (err)
4429 goto err_out;
3df25e4c 4430
63c92c9d
SN
4431 /* update counts and index */
4432 rxr_remaining--;
4433 rxr_idx++;
4434 }
3df25e4c
SN
4435 }
4436
63c92c9d
SN
4437 for (; v_idx < q_vectors; v_idx++) {
4438 int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_idx);
4439 int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_idx);
55cd7386 4440
63c92c9d
SN
4441 err = igc_alloc_q_vector(adapter, q_vectors, v_idx,
4442 tqpv, txr_idx, rqpv, rxr_idx);
55cd7386 4443
63c92c9d
SN
4444 if (err)
4445 goto err_out;
55cd7386 4446
63c92c9d
SN
4447 /* update counts and index */
4448 rxr_remaining -= rqpv;
4449 txr_remaining -= tqpv;
4450 rxr_idx++;
4451 txr_idx++;
4452 }
55cd7386 4453
63c92c9d 4454 return 0;
3df25e4c 4455
63c92c9d
SN
4456err_out:
4457 adapter->num_tx_queues = 0;
4458 adapter->num_rx_queues = 0;
4459 adapter->num_q_vectors = 0;
3df25e4c 4460
63c92c9d
SN
4461 while (v_idx--)
4462 igc_free_q_vector(adapter, v_idx);
3df25e4c 4463
63c92c9d 4464 return -ENOMEM;
3df25e4c
SN
4465}
4466
4467/**
63c92c9d 4468 * igc_init_interrupt_scheme - initialize interrupts, allocate queues/vectors
3df25e4c 4469 * @adapter: Pointer to adapter structure
63c92c9d 4470 * @msix: boolean for MSI-X capability
3df25e4c 4471 *
63c92c9d 4472 * This function initializes the interrupts and allocates all of the queues.
3df25e4c 4473 */
63c92c9d 4474static int igc_init_interrupt_scheme(struct igc_adapter *adapter, bool msix)
3df25e4c 4475{
25f06eff 4476 struct net_device *dev = adapter->netdev;
63c92c9d 4477 int err = 0;
3df25e4c 4478
63c92c9d 4479 igc_set_interrupt_capability(adapter, msix);
3df25e4c 4480
63c92c9d
SN
4481 err = igc_alloc_q_vectors(adapter);
4482 if (err) {
25f06eff 4483 netdev_err(dev, "Unable to allocate memory for vectors\n");
63c92c9d
SN
4484 goto err_alloc_q_vectors;
4485 }
3df25e4c 4486
63c92c9d 4487 igc_cache_ring_register(adapter);
3df25e4c 4488
3df25e4c
SN
4489 return 0;
4490
63c92c9d
SN
4491err_alloc_q_vectors:
4492 igc_reset_interrupt_capability(adapter);
3df25e4c
SN
4493 return err;
4494}
4495
4496/**
63c92c9d 4497 * igc_sw_init - Initialize general software structures (struct igc_adapter)
3df25e4c 4498 * @adapter: board private structure to initialize
3df25e4c 4499 *
63c92c9d
SN
4500 * igc_sw_init initializes the Adapter private data structure.
4501 * Fields are initialized based on PCI device information and
4502 * OS network device settings (MTU size).
3df25e4c 4503 */
63c92c9d 4504static int igc_sw_init(struct igc_adapter *adapter)
3df25e4c 4505{
63c92c9d
SN
4506 struct net_device *netdev = adapter->netdev;
4507 struct pci_dev *pdev = adapter->pdev;
4508 struct igc_hw *hw = &adapter->hw;
3df25e4c 4509
63c92c9d 4510 pci_read_config_word(pdev, PCI_COMMAND, &hw->bus.pci_cmd_word);
3df25e4c 4511
63c92c9d
SN
4512 /* set default ring sizes */
4513 adapter->tx_ring_count = IGC_DEFAULT_TXD;
4514 adapter->rx_ring_count = IGC_DEFAULT_RXD;
3df25e4c 4515
63c92c9d
SN
4516 /* set default ITR values */
4517 adapter->rx_itr_setting = IGC_DEFAULT_ITR;
4518 adapter->tx_itr_setting = IGC_DEFAULT_ITR;
3df25e4c 4519
63c92c9d
SN
4520 /* set default work limits */
4521 adapter->tx_work_limit = IGC_DEFAULT_TX_WORK;
3df25e4c 4522
63c92c9d
SN
4523 /* adjust max frame to be at least the size of a standard frame */
4524 adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN +
4525 VLAN_HLEN;
4526 adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
0411d368 4527
42fc5dc0 4528 mutex_init(&adapter->nfc_rule_lock);
d957c601
AG
4529 INIT_LIST_HEAD(&adapter->nfc_rule_list);
4530 adapter->nfc_rule_count = 0;
4531
63c92c9d
SN
4532 spin_lock_init(&adapter->stats64_lock);
4533 /* Assume MSI-X interrupts, will be checked during IRQ allocation */
0411d368
SN
4534 adapter->flags |= IGC_FLAG_HAS_MSIX;
4535
63c92c9d 4536 igc_init_queue_configuration(adapter);
0411d368 4537
63c92c9d
SN
4538 /* This call may decrease the number of queues */
4539 if (igc_init_interrupt_scheme(adapter, true)) {
25f06eff 4540 netdev_err(netdev, "Unable to allocate memory for queues\n");
63c92c9d
SN
4541 return -ENOMEM;
4542 }
0411d368 4543
63c92c9d
SN
4544 /* Explicitly disable IRQ since the NIC can be in any state. */
4545 igc_irq_disable(adapter);
0411d368 4546
63c92c9d 4547 set_bit(__IGC_DOWN, &adapter->state);
0411d368 4548
63c92c9d
SN
4549 return 0;
4550}
0411d368 4551
63c92c9d
SN
4552/**
4553 * igc_up - Open the interface and prepare it to handle traffic
4554 * @adapter: board private structure
4555 */
4556void igc_up(struct igc_adapter *adapter)
4557{
4558 struct igc_hw *hw = &adapter->hw;
4559 int i = 0;
0411d368 4560
63c92c9d
SN
4561 /* hardware has been reset, we need to reload some things */
4562 igc_configure(adapter);
0411d368 4563
63c92c9d 4564 clear_bit(__IGC_DOWN, &adapter->state);
0411d368 4565
63c92c9d
SN
4566 for (i = 0; i < adapter->num_q_vectors; i++)
4567 napi_enable(&adapter->q_vector[i]->napi);
0411d368 4568
63c92c9d
SN
4569 if (adapter->msix_entries)
4570 igc_configure_msix(adapter);
4571 else
4572 igc_assign_vector(adapter->q_vector[0], 0);
0411d368 4573
63c92c9d
SN
4574 /* Clear any pending interrupts. */
4575 rd32(IGC_ICR);
4576 igc_irq_enable(adapter);
0411d368 4577
63c92c9d 4578 netif_tx_start_all_queues(adapter->netdev);
0411d368 4579
63c92c9d 4580 /* start the watchdog. */
501f2309 4581 hw->mac.get_link_status = true;
63c92c9d 4582 schedule_work(&adapter->watchdog_task);
0411d368
SN
4583}
4584
f7bcca5d 4585/**
63c92c9d
SN
4586 * igc_update_stats - Update the board statistics counters
4587 * @adapter: board private structure
f7bcca5d 4588 */
63c92c9d 4589void igc_update_stats(struct igc_adapter *adapter)
f7bcca5d 4590{
63c92c9d
SN
4591 struct rtnl_link_stats64 *net_stats = &adapter->stats64;
4592 struct pci_dev *pdev = adapter->pdev;
4593 struct igc_hw *hw = &adapter->hw;
4594 u64 _bytes, _packets;
4595 u64 bytes, packets;
4596 unsigned int start;
4597 u32 mpc;
4598 int i;
f7bcca5d 4599
63c92c9d
SN
4600 /* Prevent stats update while adapter is being reset, or if the pci
4601 * connection is down.
f7bcca5d 4602 */
63c92c9d
SN
4603 if (adapter->link_speed == 0)
4604 return;
4605 if (pci_channel_offline(pdev))
4606 return;
f7bcca5d 4607
63c92c9d
SN
4608 packets = 0;
4609 bytes = 0;
3df25e4c 4610
63c92c9d
SN
4611 rcu_read_lock();
4612 for (i = 0; i < adapter->num_rx_queues; i++) {
4613 struct igc_ring *ring = adapter->rx_ring[i];
4614 u32 rqdpc = rd32(IGC_RQDPC(i));
3df25e4c 4615
63c92c9d
SN
4616 if (hw->mac.type >= igc_i225)
4617 wr32(IGC_RQDPC(i), 0);
3df25e4c 4618
63c92c9d
SN
4619 if (rqdpc) {
4620 ring->rx_stats.drops += rqdpc;
4621 net_stats->rx_fifo_errors += rqdpc;
4622 }
a8c4873b 4623
63c92c9d
SN
4624 do {
4625 start = u64_stats_fetch_begin_irq(&ring->rx_syncp);
4626 _bytes = ring->rx_stats.bytes;
4627 _packets = ring->rx_stats.packets;
4628 } while (u64_stats_fetch_retry_irq(&ring->rx_syncp, start));
4629 bytes += _bytes;
4630 packets += _packets;
4631 }
4632
4633 net_stats->rx_bytes = bytes;
4634 net_stats->rx_packets = packets;
4635
4636 packets = 0;
4637 bytes = 0;
4638 for (i = 0; i < adapter->num_tx_queues; i++) {
4639 struct igc_ring *ring = adapter->tx_ring[i];
4640
4641 do {
4642 start = u64_stats_fetch_begin_irq(&ring->tx_syncp);
4643 _bytes = ring->tx_stats.bytes;
4644 _packets = ring->tx_stats.packets;
4645 } while (u64_stats_fetch_retry_irq(&ring->tx_syncp, start));
4646 bytes += _bytes;
4647 packets += _packets;
4648 }
4649 net_stats->tx_bytes = bytes;
4650 net_stats->tx_packets = packets;
4651 rcu_read_unlock();
4652
4653 /* read stats registers */
4654 adapter->stats.crcerrs += rd32(IGC_CRCERRS);
4655 adapter->stats.gprc += rd32(IGC_GPRC);
4656 adapter->stats.gorc += rd32(IGC_GORCL);
4657 rd32(IGC_GORCH); /* clear GORCL */
4658 adapter->stats.bprc += rd32(IGC_BPRC);
4659 adapter->stats.mprc += rd32(IGC_MPRC);
4660 adapter->stats.roc += rd32(IGC_ROC);
4661
4662 adapter->stats.prc64 += rd32(IGC_PRC64);
4663 adapter->stats.prc127 += rd32(IGC_PRC127);
4664 adapter->stats.prc255 += rd32(IGC_PRC255);
4665 adapter->stats.prc511 += rd32(IGC_PRC511);
4666 adapter->stats.prc1023 += rd32(IGC_PRC1023);
4667 adapter->stats.prc1522 += rd32(IGC_PRC1522);
40edc734
SN
4668 adapter->stats.tlpic += rd32(IGC_TLPIC);
4669 adapter->stats.rlpic += rd32(IGC_RLPIC);
e6529944 4670 adapter->stats.hgptc += rd32(IGC_HGPTC);
63c92c9d
SN
4671
4672 mpc = rd32(IGC_MPC);
4673 adapter->stats.mpc += mpc;
4674 net_stats->rx_fifo_errors += mpc;
4675 adapter->stats.scc += rd32(IGC_SCC);
4676 adapter->stats.ecol += rd32(IGC_ECOL);
4677 adapter->stats.mcc += rd32(IGC_MCC);
4678 adapter->stats.latecol += rd32(IGC_LATECOL);
4679 adapter->stats.dc += rd32(IGC_DC);
4680 adapter->stats.rlec += rd32(IGC_RLEC);
4681 adapter->stats.xonrxc += rd32(IGC_XONRXC);
4682 adapter->stats.xontxc += rd32(IGC_XONTXC);
4683 adapter->stats.xoffrxc += rd32(IGC_XOFFRXC);
4684 adapter->stats.xofftxc += rd32(IGC_XOFFTXC);
4685 adapter->stats.fcruc += rd32(IGC_FCRUC);
4686 adapter->stats.gptc += rd32(IGC_GPTC);
4687 adapter->stats.gotc += rd32(IGC_GOTCL);
4688 rd32(IGC_GOTCH); /* clear GOTCL */
4689 adapter->stats.rnbc += rd32(IGC_RNBC);
4690 adapter->stats.ruc += rd32(IGC_RUC);
4691 adapter->stats.rfc += rd32(IGC_RFC);
4692 adapter->stats.rjc += rd32(IGC_RJC);
4693 adapter->stats.tor += rd32(IGC_TORH);
4694 adapter->stats.tot += rd32(IGC_TOTH);
4695 adapter->stats.tpr += rd32(IGC_TPR);
4696
4697 adapter->stats.ptc64 += rd32(IGC_PTC64);
4698 adapter->stats.ptc127 += rd32(IGC_PTC127);
4699 adapter->stats.ptc255 += rd32(IGC_PTC255);
4700 adapter->stats.ptc511 += rd32(IGC_PTC511);
4701 adapter->stats.ptc1023 += rd32(IGC_PTC1023);
4702 adapter->stats.ptc1522 += rd32(IGC_PTC1522);
4703
4704 adapter->stats.mptc += rd32(IGC_MPTC);
4705 adapter->stats.bptc += rd32(IGC_BPTC);
4706
4707 adapter->stats.tpt += rd32(IGC_TPT);
4708 adapter->stats.colc += rd32(IGC_COLC);
51c657b4 4709 adapter->stats.colc += rd32(IGC_RERC);
63c92c9d
SN
4710
4711 adapter->stats.algnerrc += rd32(IGC_ALGNERRC);
4712
4713 adapter->stats.tsctc += rd32(IGC_TSCTC);
63c92c9d
SN
4714
4715 adapter->stats.iac += rd32(IGC_IAC);
63c92c9d
SN
4716
4717 /* Fill out the OS statistics structure */
4718 net_stats->multicast = adapter->stats.mprc;
4719 net_stats->collisions = adapter->stats.colc;
4720
4721 /* Rx Errors */
4722
4723 /* RLEC on some newer hardware can be incorrect so build
4724 * our own version based on RUC and ROC
4725 */
4726 net_stats->rx_errors = adapter->stats.rxerrc +
4727 adapter->stats.crcerrs + adapter->stats.algnerrc +
4728 adapter->stats.ruc + adapter->stats.roc +
4729 adapter->stats.cexterr;
4730 net_stats->rx_length_errors = adapter->stats.ruc +
4731 adapter->stats.roc;
4732 net_stats->rx_crc_errors = adapter->stats.crcerrs;
4733 net_stats->rx_frame_errors = adapter->stats.algnerrc;
4734 net_stats->rx_missed_errors = adapter->stats.mpc;
4735
4736 /* Tx Errors */
4737 net_stats->tx_errors = adapter->stats.ecol +
4738 adapter->stats.latecol;
4739 net_stats->tx_aborted_errors = adapter->stats.ecol;
4740 net_stats->tx_window_errors = adapter->stats.latecol;
4741 net_stats->tx_carrier_errors = adapter->stats.tncrs;
4742
4743 /* Tx Dropped needs to be maintained elsewhere */
4744
4745 /* Management Stats */
4746 adapter->stats.mgptc += rd32(IGC_MGTPTC);
4747 adapter->stats.mgprc += rd32(IGC_MGTPRC);
4748 adapter->stats.mgpdc += rd32(IGC_MGTPDC);
4749}
4750
208983f0 4751/**
63c92c9d
SN
4752 * igc_down - Close the interface
4753 * @adapter: board private structure
208983f0 4754 */
63c92c9d 4755void igc_down(struct igc_adapter *adapter)
208983f0 4756{
63c92c9d 4757 struct net_device *netdev = adapter->netdev;
208983f0 4758 struct igc_hw *hw = &adapter->hw;
63c92c9d
SN
4759 u32 tctl, rctl;
4760 int i = 0;
208983f0 4761
63c92c9d 4762 set_bit(__IGC_DOWN, &adapter->state);
208983f0 4763
b03c49cd
VCG
4764 igc_ptp_suspend(adapter);
4765
63c92c9d
SN
4766 /* disable receives in the hardware */
4767 rctl = rd32(IGC_RCTL);
4768 wr32(IGC_RCTL, rctl & ~IGC_RCTL_EN);
4769 /* flush and sleep below */
4770
63c92c9d
SN
4771 /* set trans_start so we don't get spurious watchdogs during reset */
4772 netif_trans_update(netdev);
4773
4774 netif_carrier_off(netdev);
4775 netif_tx_stop_all_queues(netdev);
4776
4777 /* disable transmits in the hardware */
4778 tctl = rd32(IGC_TCTL);
4779 tctl &= ~IGC_TCTL_EN;
4780 wr32(IGC_TCTL, tctl);
4781 /* flush both disables and wait for them to finish */
4782 wrfl();
4783 usleep_range(10000, 20000);
4784
4785 igc_irq_disable(adapter);
4786
4787 adapter->flags &= ~IGC_FLAG_NEED_LINK_UPDATE;
4788
4789 for (i = 0; i < adapter->num_q_vectors; i++) {
4790 if (adapter->q_vector[i]) {
4791 napi_synchronize(&adapter->q_vector[i]->napi);
4792 napi_disable(&adapter->q_vector[i]->napi);
208983f0
SN
4793 }
4794 }
4795
63c92c9d
SN
4796 del_timer_sync(&adapter->watchdog_timer);
4797 del_timer_sync(&adapter->phy_info_timer);
4798
4799 /* record the stats before reset*/
4800 spin_lock(&adapter->stats64_lock);
4801 igc_update_stats(adapter);
4802 spin_unlock(&adapter->stats64_lock);
4803
4804 adapter->link_speed = 0;
4805 adapter->link_duplex = 0;
4806
4807 if (!pci_channel_offline(adapter->pdev))
4808 igc_reset(adapter);
4809
4810 /* clear VLAN promisc flag so VFTA will be updated if necessary */
4811 adapter->flags &= ~IGC_FLAG_VLAN_PROMISC;
4812
4813 igc_clean_all_tx_rings(adapter);
4814 igc_clean_all_rx_rings(adapter);
208983f0
SN
4815}
4816
63c92c9d 4817void igc_reinit_locked(struct igc_adapter *adapter)
0507ef8a 4818{
63c92c9d
SN
4819 while (test_and_set_bit(__IGC_RESETTING, &adapter->state))
4820 usleep_range(1000, 2000);
4821 igc_down(adapter);
4822 igc_up(adapter);
4823 clear_bit(__IGC_RESETTING, &adapter->state);
208983f0
SN
4824}
4825
63c92c9d 4826static void igc_reset_task(struct work_struct *work)
208983f0 4827{
63c92c9d 4828 struct igc_adapter *adapter;
208983f0 4829
63c92c9d 4830 adapter = container_of(work, struct igc_adapter, reset_task);
208983f0 4831
6da26237
SN
4832 rtnl_lock();
4833 /* If we're already down or resetting, just bail */
4834 if (test_bit(__IGC_DOWN, &adapter->state) ||
4835 test_bit(__IGC_RESETTING, &adapter->state)) {
4836 rtnl_unlock();
4837 return;
4838 }
4839
9c384ee3
SN
4840 igc_rings_dump(adapter);
4841 igc_regs_dump(adapter);
63c92c9d
SN
4842 netdev_err(adapter->netdev, "Reset adapter\n");
4843 igc_reinit_locked(adapter);
6da26237 4844 rtnl_unlock();
63c92c9d 4845}
208983f0 4846
63c92c9d
SN
4847/**
4848 * igc_change_mtu - Change the Maximum Transfer Unit
4849 * @netdev: network interface device structure
4850 * @new_mtu: new value for maximum frame size
4851 *
4852 * Returns 0 on success, negative on failure
4853 */
4854static int igc_change_mtu(struct net_device *netdev, int new_mtu)
4855{
4856 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
4857 struct igc_adapter *adapter = netdev_priv(netdev);
208983f0 4858
26575105
AG
4859 if (igc_xdp_is_enabled(adapter) && new_mtu > ETH_DATA_LEN) {
4860 netdev_dbg(netdev, "Jumbo frames not supported with XDP");
4861 return -EINVAL;
4862 }
4863
63c92c9d
SN
4864 /* adjust max frame to be at least the size of a standard frame */
4865 if (max_frame < (ETH_FRAME_LEN + ETH_FCS_LEN))
4866 max_frame = ETH_FRAME_LEN + ETH_FCS_LEN;
208983f0 4867
63c92c9d
SN
4868 while (test_and_set_bit(__IGC_RESETTING, &adapter->state))
4869 usleep_range(1000, 2000);
208983f0 4870
63c92c9d
SN
4871 /* igc_down has a dependency on max_frame_size */
4872 adapter->max_frame_size = max_frame;
208983f0 4873
63c92c9d
SN
4874 if (netif_running(netdev))
4875 igc_down(adapter);
208983f0 4876
25f06eff 4877 netdev_dbg(netdev, "changing MTU from %d to %d\n", netdev->mtu, new_mtu);
63c92c9d 4878 netdev->mtu = new_mtu;
208983f0 4879
63c92c9d
SN
4880 if (netif_running(netdev))
4881 igc_up(adapter);
4882 else
4883 igc_reset(adapter);
208983f0 4884
63c92c9d 4885 clear_bit(__IGC_RESETTING, &adapter->state);
208983f0 4886
63c92c9d
SN
4887 return 0;
4888}
208983f0 4889
63c92c9d 4890/**
6b7ed22a 4891 * igc_get_stats64 - Get System Network Statistics
63c92c9d 4892 * @netdev: network interface device structure
6b7ed22a 4893 * @stats: rtnl_link_stats64 pointer
63c92c9d
SN
4894 *
4895 * Returns the address of the device statistics structure.
4896 * The statistics are updated here and also from the timer callback.
4897 */
6b7ed22a
VCG
4898static void igc_get_stats64(struct net_device *netdev,
4899 struct rtnl_link_stats64 *stats)
63c92c9d
SN
4900{
4901 struct igc_adapter *adapter = netdev_priv(netdev);
208983f0 4902
6b7ed22a 4903 spin_lock(&adapter->stats64_lock);
63c92c9d
SN
4904 if (!test_bit(__IGC_RESETTING, &adapter->state))
4905 igc_update_stats(adapter);
6b7ed22a
VCG
4906 memcpy(stats, &adapter->stats64, sizeof(*stats));
4907 spin_unlock(&adapter->stats64_lock);
63c92c9d
SN
4908}
4909
4910static netdev_features_t igc_fix_features(struct net_device *netdev,
4911 netdev_features_t features)
4912{
4913 /* Since there is no support for separate Rx/Tx vlan accel
4914 * enable/disable make sure Tx flag is always in same state as Rx.
4915 */
4916 if (features & NETIF_F_HW_VLAN_CTAG_RX)
4917 features |= NETIF_F_HW_VLAN_CTAG_TX;
4918 else
4919 features &= ~NETIF_F_HW_VLAN_CTAG_TX;
4920
4921 return features;
4922}
4923
4924static int igc_set_features(struct net_device *netdev,
4925 netdev_features_t features)
4926{
4927 netdev_features_t changed = netdev->features ^ features;
4928 struct igc_adapter *adapter = netdev_priv(netdev);
208983f0 4929
8d744963
MHZ
4930 if (changed & NETIF_F_HW_VLAN_CTAG_RX)
4931 igc_vlan_mode(netdev, features);
4932
63c92c9d
SN
4933 /* Add VLAN support */
4934 if (!(changed & (NETIF_F_RXALL | NETIF_F_NTUPLE)))
4935 return 0;
208983f0 4936
e256ec83
AG
4937 if (!(features & NETIF_F_NTUPLE))
4938 igc_flush_nfc_rules(adapter);
208983f0 4939
63c92c9d 4940 netdev->features = features;
208983f0 4941
63c92c9d
SN
4942 if (netif_running(netdev))
4943 igc_reinit_locked(adapter);
4944 else
4945 igc_reset(adapter);
208983f0 4946
63c92c9d 4947 return 1;
0507ef8a
SN
4948}
4949
63c92c9d
SN
4950static netdev_features_t
4951igc_features_check(struct sk_buff *skb, struct net_device *dev,
4952 netdev_features_t features)
3df25e4c 4953{
63c92c9d 4954 unsigned int network_hdr_len, mac_hdr_len;
3df25e4c 4955
63c92c9d
SN
4956 /* Make certain the headers can be described by a context descriptor */
4957 mac_hdr_len = skb_network_header(skb) - skb->data;
4958 if (unlikely(mac_hdr_len > IGC_MAX_MAC_HDR_LEN))
4959 return features & ~(NETIF_F_HW_CSUM |
4960 NETIF_F_SCTP_CRC |
4961 NETIF_F_HW_VLAN_CTAG_TX |
4962 NETIF_F_TSO |
4963 NETIF_F_TSO6);
3df25e4c 4964
63c92c9d
SN
4965 network_hdr_len = skb_checksum_start(skb) - skb_network_header(skb);
4966 if (unlikely(network_hdr_len > IGC_MAX_NETWORK_HDR_LEN))
4967 return features & ~(NETIF_F_HW_CSUM |
4968 NETIF_F_SCTP_CRC |
4969 NETIF_F_TSO |
4970 NETIF_F_TSO6);
3df25e4c 4971
63c92c9d
SN
4972 /* We can only support IPv4 TSO in tunnels if we can mangle the
4973 * inner IP ID field, so strip TSO if MANGLEID is not supported.
4974 */
4975 if (skb->encapsulation && !(features & NETIF_F_TSO_MANGLEID))
4976 features &= ~NETIF_F_TSO;
3df25e4c 4977
63c92c9d
SN
4978 return features;
4979}
3df25e4c 4980
2c344ae2
VCG
4981static void igc_tsync_interrupt(struct igc_adapter *adapter)
4982{
87938851 4983 u32 ack, tsauxc, sec, nsec, tsicr;
2c344ae2 4984 struct igc_hw *hw = &adapter->hw;
64433e5b 4985 struct ptp_clock_event event;
87938851
ES
4986 struct timespec64 ts;
4987
4988 tsicr = rd32(IGC_TSICR);
4989 ack = 0;
2c344ae2 4990
64433e5b
ES
4991 if (tsicr & IGC_TSICR_SYS_WRAP) {
4992 event.type = PTP_CLOCK_PPS;
4993 if (adapter->ptp_caps.pps)
4994 ptp_clock_event(adapter->ptp_clock, &event);
4995 ack |= IGC_TSICR_SYS_WRAP;
4996 }
4997
2c344ae2
VCG
4998 if (tsicr & IGC_TSICR_TXTS) {
4999 /* retrieve hardware timestamp */
5000 schedule_work(&adapter->ptp_tx_work);
5001 ack |= IGC_TSICR_TXTS;
5002 }
5003
87938851
ES
5004 if (tsicr & IGC_TSICR_TT0) {
5005 spin_lock(&adapter->tmreg_lock);
5006 ts = timespec64_add(adapter->perout[0].start,
5007 adapter->perout[0].period);
5008 wr32(IGC_TRGTTIML0, ts.tv_nsec | IGC_TT_IO_TIMER_SEL_SYSTIM0);
5009 wr32(IGC_TRGTTIMH0, (u32)ts.tv_sec);
5010 tsauxc = rd32(IGC_TSAUXC);
5011 tsauxc |= IGC_TSAUXC_EN_TT0;
5012 wr32(IGC_TSAUXC, tsauxc);
5013 adapter->perout[0].start = ts;
5014 spin_unlock(&adapter->tmreg_lock);
5015 ack |= IGC_TSICR_TT0;
5016 }
5017
5018 if (tsicr & IGC_TSICR_TT1) {
5019 spin_lock(&adapter->tmreg_lock);
5020 ts = timespec64_add(adapter->perout[1].start,
5021 adapter->perout[1].period);
5022 wr32(IGC_TRGTTIML1, ts.tv_nsec | IGC_TT_IO_TIMER_SEL_SYSTIM0);
5023 wr32(IGC_TRGTTIMH1, (u32)ts.tv_sec);
5024 tsauxc = rd32(IGC_TSAUXC);
5025 tsauxc |= IGC_TSAUXC_EN_TT1;
5026 wr32(IGC_TSAUXC, tsauxc);
5027 adapter->perout[1].start = ts;
5028 spin_unlock(&adapter->tmreg_lock);
5029 ack |= IGC_TSICR_TT1;
5030 }
5031
5032 if (tsicr & IGC_TSICR_AUTT0) {
5033 nsec = rd32(IGC_AUXSTMPL0);
5034 sec = rd32(IGC_AUXSTMPH0);
5035 event.type = PTP_CLOCK_EXTTS;
5036 event.index = 0;
5037 event.timestamp = sec * NSEC_PER_SEC + nsec;
5038 ptp_clock_event(adapter->ptp_clock, &event);
5039 ack |= IGC_TSICR_AUTT0;
5040 }
5041
5042 if (tsicr & IGC_TSICR_AUTT1) {
5043 nsec = rd32(IGC_AUXSTMPL1);
5044 sec = rd32(IGC_AUXSTMPH1);
5045 event.type = PTP_CLOCK_EXTTS;
5046 event.index = 1;
5047 event.timestamp = sec * NSEC_PER_SEC + nsec;
5048 ptp_clock_event(adapter->ptp_clock, &event);
5049 ack |= IGC_TSICR_AUTT1;
5050 }
5051
2c344ae2
VCG
5052 /* acknowledge the interrupts */
5053 wr32(IGC_TSICR, ack);
5054}
5055
13b5b7fd 5056/**
63c92c9d 5057 * igc_msix_other - msix other interrupt handler
13b5b7fd 5058 * @irq: interrupt number
63c92c9d 5059 * @data: pointer to a q_vector
13b5b7fd 5060 */
63c92c9d 5061static irqreturn_t igc_msix_other(int irq, void *data)
13b5b7fd
SN
5062{
5063 struct igc_adapter *adapter = data;
13b5b7fd 5064 struct igc_hw *hw = &adapter->hw;
13b5b7fd
SN
5065 u32 icr = rd32(IGC_ICR);
5066
63c92c9d 5067 /* reading ICR causes bit 31 of EICR to be cleared */
13b5b7fd
SN
5068 if (icr & IGC_ICR_DRSTA)
5069 schedule_work(&adapter->reset_task);
5070
5071 if (icr & IGC_ICR_DOUTSYNC) {
5072 /* HW is reporting DMA is out of sync */
5073 adapter->stats.doosync++;
5074 }
5075
63c92c9d 5076 if (icr & IGC_ICR_LSC) {
501f2309 5077 hw->mac.get_link_status = true;
63c92c9d 5078 /* guard against interrupt when we're going down */
13b5b7fd
SN
5079 if (!test_bit(__IGC_DOWN, &adapter->state))
5080 mod_timer(&adapter->watchdog_timer, jiffies + 1);
5081 }
5082
2c344ae2
VCG
5083 if (icr & IGC_ICR_TS)
5084 igc_tsync_interrupt(adapter);
5085
63c92c9d 5086 wr32(IGC_EIMS, adapter->eims_other);
13b5b7fd
SN
5087
5088 return IRQ_HANDLED;
5089}
5090
63c92c9d 5091static void igc_write_itr(struct igc_q_vector *q_vector)
13b5b7fd 5092{
63c92c9d 5093 u32 itr_val = q_vector->itr_val & IGC_QVECTOR_MASK;
13b5b7fd 5094
63c92c9d
SN
5095 if (!q_vector->set_itr)
5096 return;
13b5b7fd 5097
63c92c9d
SN
5098 if (!itr_val)
5099 itr_val = IGC_ITR_VAL_MASK;
13b5b7fd 5100
63c92c9d 5101 itr_val |= IGC_EITR_CNT_IGNR;
13b5b7fd 5102
63c92c9d
SN
5103 writel(itr_val, q_vector->itr_register);
5104 q_vector->set_itr = 0;
5105}
13b5b7fd 5106
63c92c9d
SN
5107static irqreturn_t igc_msix_ring(int irq, void *data)
5108{
5109 struct igc_q_vector *q_vector = data;
5110
5111 /* Write the ITR value calculated from the previous interrupt. */
5112 igc_write_itr(q_vector);
13b5b7fd
SN
5113
5114 napi_schedule(&q_vector->napi);
5115
5116 return IRQ_HANDLED;
5117}
5118
63c92c9d
SN
5119/**
5120 * igc_request_msix - Initialize MSI-X interrupts
5121 * @adapter: Pointer to adapter structure
5122 *
5123 * igc_request_msix allocates MSI-X vectors and requests interrupts from the
5124 * kernel.
5125 */
5126static int igc_request_msix(struct igc_adapter *adapter)
3df25e4c 5127{
373e2829 5128 unsigned int num_q_vectors = adapter->num_q_vectors;
63c92c9d
SN
5129 int i = 0, err = 0, vector = 0, free_vector = 0;
5130 struct net_device *netdev = adapter->netdev;
3df25e4c 5131
63c92c9d
SN
5132 err = request_irq(adapter->msix_entries[vector].vector,
5133 &igc_msix_other, 0, netdev->name, adapter);
5134 if (err)
5135 goto err_out;
3df25e4c 5136
373e2829
SN
5137 if (num_q_vectors > MAX_Q_VECTORS) {
5138 num_q_vectors = MAX_Q_VECTORS;
5139 dev_warn(&adapter->pdev->dev,
5140 "The number of queue vectors (%d) is higher than max allowed (%d)\n",
5141 adapter->num_q_vectors, MAX_Q_VECTORS);
5142 }
5143 for (i = 0; i < num_q_vectors; i++) {
63c92c9d 5144 struct igc_q_vector *q_vector = adapter->q_vector[i];
3df25e4c 5145
63c92c9d 5146 vector++;
3df25e4c 5147
63c92c9d 5148 q_vector->itr_register = adapter->io_addr + IGC_EITR(vector);
3df25e4c 5149
63c92c9d
SN
5150 if (q_vector->rx.ring && q_vector->tx.ring)
5151 sprintf(q_vector->name, "%s-TxRx-%u", netdev->name,
5152 q_vector->rx.ring->queue_index);
5153 else if (q_vector->tx.ring)
5154 sprintf(q_vector->name, "%s-tx-%u", netdev->name,
5155 q_vector->tx.ring->queue_index);
5156 else if (q_vector->rx.ring)
5157 sprintf(q_vector->name, "%s-rx-%u", netdev->name,
5158 q_vector->rx.ring->queue_index);
5159 else
5160 sprintf(q_vector->name, "%s-unused", netdev->name);
3df25e4c 5161
63c92c9d
SN
5162 err = request_irq(adapter->msix_entries[vector].vector,
5163 igc_msix_ring, 0, q_vector->name,
5164 q_vector);
5165 if (err)
5166 goto err_free;
3df25e4c 5167 }
3df25e4c 5168
63c92c9d
SN
5169 igc_configure_msix(adapter);
5170 return 0;
3df25e4c 5171
63c92c9d
SN
5172err_free:
5173 /* free already assigned IRQs */
5174 free_irq(adapter->msix_entries[free_vector++].vector, adapter);
3df25e4c 5175
63c92c9d
SN
5176 vector--;
5177 for (i = 0; i < vector; i++) {
5178 free_irq(adapter->msix_entries[free_vector++].vector,
5179 adapter->q_vector[i]);
3df25e4c 5180 }
63c92c9d
SN
5181err_out:
5182 return err;
3df25e4c
SN
5183}
5184
5185/**
63c92c9d
SN
5186 * igc_clear_interrupt_scheme - reset the device to a state of no interrupts
5187 * @adapter: Pointer to adapter structure
5188 *
5189 * This function resets the device so that it has 0 rx queues, tx queues, and
5190 * MSI-X interrupts allocated.
3df25e4c 5191 */
63c92c9d 5192static void igc_clear_interrupt_scheme(struct igc_adapter *adapter)
3df25e4c 5193{
63c92c9d
SN
5194 igc_free_q_vectors(adapter);
5195 igc_reset_interrupt_capability(adapter);
5196}
3df25e4c 5197
63c92c9d
SN
5198/* Need to wait a few seconds after link up to get diagnostic information from
5199 * the phy
5200 */
5201static void igc_update_phy_info(struct timer_list *t)
5202{
5203 struct igc_adapter *adapter = from_timer(adapter, t, phy_info_timer);
3df25e4c 5204
63c92c9d 5205 igc_get_phy_info(&adapter->hw);
3df25e4c
SN
5206}
5207
63c92c9d
SN
5208/**
5209 * igc_has_link - check shared code for link and determine up/down
5210 * @adapter: pointer to driver private info
5211 */
5212bool igc_has_link(struct igc_adapter *adapter)
3df25e4c 5213{
63c92c9d
SN
5214 struct igc_hw *hw = &adapter->hw;
5215 bool link_active = false;
5216
5217 /* get_link_status is set on LSC (link status) interrupt or
5218 * rx sequence error interrupt. get_link_status will stay
5219 * false until the igc_check_for_link establishes link
5220 * for copper adapters ONLY
5221 */
47bca7de
SN
5222 if (!hw->mac.get_link_status)
5223 return true;
5224 hw->mac.ops.check_for_link(hw);
5225 link_active = !hw->mac.get_link_status;
63c92c9d 5226
7c496de5 5227 if (hw->mac.type == igc_i225) {
63c92c9d
SN
5228 if (!netif_carrier_ok(adapter->netdev)) {
5229 adapter->flags &= ~IGC_FLAG_NEED_LINK_UPDATE;
5230 } else if (!(adapter->flags & IGC_FLAG_NEED_LINK_UPDATE)) {
5231 adapter->flags |= IGC_FLAG_NEED_LINK_UPDATE;
5232 adapter->link_check_timeout = jiffies;
5233 }
5234 }
5235
5236 return link_active;
3df25e4c
SN
5237}
5238
5239/**
63c92c9d
SN
5240 * igc_watchdog - Timer Call-back
5241 * @t: timer for the watchdog
3df25e4c 5242 */
63c92c9d 5243static void igc_watchdog(struct timer_list *t)
3df25e4c 5244{
63c92c9d
SN
5245 struct igc_adapter *adapter = from_timer(adapter, t, watchdog_timer);
5246 /* Do the rest outside of interrupt context */
5247 schedule_work(&adapter->watchdog_task);
5248}
3df25e4c 5249
63c92c9d
SN
5250static void igc_watchdog_task(struct work_struct *work)
5251{
5252 struct igc_adapter *adapter = container_of(work,
5253 struct igc_adapter,
5254 watchdog_task);
5255 struct net_device *netdev = adapter->netdev;
5256 struct igc_hw *hw = &adapter->hw;
5257 struct igc_phy_info *phy = &hw->phy;
5258 u16 phy_data, retry_count = 20;
63c92c9d
SN
5259 u32 link;
5260 int i;
3df25e4c 5261
63c92c9d 5262 link = igc_has_link(adapter);
3df25e4c 5263
63c92c9d
SN
5264 if (adapter->flags & IGC_FLAG_NEED_LINK_UPDATE) {
5265 if (time_after(jiffies, (adapter->link_check_timeout + HZ)))
5266 adapter->flags &= ~IGC_FLAG_NEED_LINK_UPDATE;
5267 else
5268 link = false;
5269 }
3df25e4c 5270
63c92c9d 5271 if (link) {
8594a7f3
SN
5272 /* Cancel scheduled suspend requests. */
5273 pm_runtime_resume(netdev->dev.parent);
5274
63c92c9d
SN
5275 if (!netif_carrier_ok(netdev)) {
5276 u32 ctrl;
3df25e4c 5277
63c92c9d
SN
5278 hw->mac.ops.get_speed_and_duplex(hw,
5279 &adapter->link_speed,
5280 &adapter->link_duplex);
3df25e4c 5281
63c92c9d
SN
5282 ctrl = rd32(IGC_CTRL);
5283 /* Link status message must follow this format */
5284 netdev_info(netdev,
25f06eff 5285 "NIC Link is Up %d Mbps %s Duplex, Flow Control: %s\n",
63c92c9d
SN
5286 adapter->link_speed,
5287 adapter->link_duplex == FULL_DUPLEX ?
5288 "Full" : "Half",
5289 (ctrl & IGC_CTRL_TFCE) &&
5290 (ctrl & IGC_CTRL_RFCE) ? "RX/TX" :
5291 (ctrl & IGC_CTRL_RFCE) ? "RX" :
5292 (ctrl & IGC_CTRL_TFCE) ? "TX" : "None");
3df25e4c 5293
93ec439a
SN
5294 /* disable EEE if enabled */
5295 if ((adapter->flags & IGC_FLAG_EEE) &&
5296 adapter->link_duplex == HALF_DUPLEX) {
5297 netdev_info(netdev,
5298 "EEE Disabled: unsupported at half duplex. Re-enable using ethtool when at full duplex\n");
5299 adapter->hw.dev_spec._base.eee_enable = false;
5300 adapter->flags &= ~IGC_FLAG_EEE;
5301 }
5302
63c92c9d
SN
5303 /* check if SmartSpeed worked */
5304 igc_check_downshift(hw);
5305 if (phy->speed_downgraded)
5306 netdev_warn(netdev, "Link Speed was downgraded by SmartSpeed\n");
3df25e4c 5307
63c92c9d
SN
5308 /* adjust timeout factor according to speed/duplex */
5309 adapter->tx_timeout_factor = 1;
5310 switch (adapter->link_speed) {
5311 case SPEED_10:
5312 adapter->tx_timeout_factor = 14;
5313 break;
5314 case SPEED_100:
b27b8dc7
MHZ
5315 case SPEED_1000:
5316 case SPEED_2500:
5317 adapter->tx_timeout_factor = 7;
63c92c9d
SN
5318 break;
5319 }
3df25e4c 5320
63c92c9d
SN
5321 if (adapter->link_speed != SPEED_1000)
5322 goto no_wait;
3df25e4c 5323
63c92c9d
SN
5324 /* wait for Remote receiver status OK */
5325retry_read_status:
5326 if (!igc_read_phy_reg(hw, PHY_1000T_STATUS,
5327 &phy_data)) {
5328 if (!(phy_data & SR_1000T_REMOTE_RX_STATUS) &&
5329 retry_count) {
5330 msleep(100);
5331 retry_count--;
5332 goto retry_read_status;
5333 } else if (!retry_count) {
25f06eff 5334 netdev_err(netdev, "exceed max 2 second\n");
63c92c9d
SN
5335 }
5336 } else {
25f06eff 5337 netdev_err(netdev, "read 1000Base-T Status Reg\n");
63c92c9d
SN
5338 }
5339no_wait:
5340 netif_carrier_on(netdev);
3df25e4c 5341
63c92c9d
SN
5342 /* link state has changed, schedule phy info update */
5343 if (!test_bit(__IGC_DOWN, &adapter->state))
5344 mod_timer(&adapter->phy_info_timer,
5345 round_jiffies(jiffies + 2 * HZ));
5346 }
5347 } else {
5348 if (netif_carrier_ok(netdev)) {
5349 adapter->link_speed = 0;
5350 adapter->link_duplex = 0;
3df25e4c 5351
63c92c9d 5352 /* Links status message must follow this format */
25f06eff 5353 netdev_info(netdev, "NIC Link is Down\n");
63c92c9d 5354 netif_carrier_off(netdev);
3df25e4c 5355
63c92c9d
SN
5356 /* link state has changed, schedule phy info update */
5357 if (!test_bit(__IGC_DOWN, &adapter->state))
5358 mod_timer(&adapter->phy_info_timer,
5359 round_jiffies(jiffies + 2 * HZ));
3df25e4c 5360
63c92c9d
SN
5361 /* link is down, time to check for alternate media */
5362 if (adapter->flags & IGC_FLAG_MAS_ENABLE) {
5363 if (adapter->flags & IGC_FLAG_MEDIA_RESET) {
5364 schedule_work(&adapter->reset_task);
5365 /* return immediately */
5366 return;
5367 }
5368 }
8594a7f3
SN
5369 pm_schedule_suspend(netdev->dev.parent,
5370 MSEC_PER_SEC * 5);
3df25e4c 5371
63c92c9d
SN
5372 /* also check for alternate media here */
5373 } else if (!netif_carrier_ok(netdev) &&
5374 (adapter->flags & IGC_FLAG_MAS_ENABLE)) {
5375 if (adapter->flags & IGC_FLAG_MEDIA_RESET) {
5376 schedule_work(&adapter->reset_task);
5377 /* return immediately */
5378 return;
5379 }
5380 }
3df25e4c
SN
5381 }
5382
63c92c9d
SN
5383 spin_lock(&adapter->stats64_lock);
5384 igc_update_stats(adapter);
5385 spin_unlock(&adapter->stats64_lock);
3df25e4c 5386
63c92c9d
SN
5387 for (i = 0; i < adapter->num_tx_queues; i++) {
5388 struct igc_ring *tx_ring = adapter->tx_ring[i];
5389
5390 if (!netif_carrier_ok(netdev)) {
5391 /* We've lost link, so the controller stops DMA,
5392 * but we've got queued Tx work that's never going
5393 * to get done, so reset controller to flush Tx.
5394 * (Do the reset outside of interrupt context).
5395 */
5396 if (igc_desc_unused(tx_ring) + 1 < tx_ring->count) {
5397 adapter->tx_timeout_count++;
5398 schedule_work(&adapter->reset_task);
5399 /* return immediately since reset is imminent */
5400 return;
5401 }
5402 }
3df25e4c 5403
63c92c9d
SN
5404 /* Force detection of hung controller every watchdog period */
5405 set_bit(IGC_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags);
5406 }
3df25e4c 5407
63c92c9d
SN
5408 /* Cause software interrupt to ensure Rx ring is cleaned */
5409 if (adapter->flags & IGC_FLAG_HAS_MSIX) {
5410 u32 eics = 0;
3df25e4c 5411
63c92c9d
SN
5412 for (i = 0; i < adapter->num_q_vectors; i++)
5413 eics |= adapter->q_vector[i]->eims_value;
5414 wr32(IGC_EICS, eics);
5415 } else {
5416 wr32(IGC_ICS, IGC_ICS_RXDMT0);
3df25e4c
SN
5417 }
5418
2c344ae2
VCG
5419 igc_ptp_tx_hang(adapter);
5420
63c92c9d
SN
5421 /* Reset the timer */
5422 if (!test_bit(__IGC_DOWN, &adapter->state)) {
5423 if (adapter->flags & IGC_FLAG_NEED_LINK_UPDATE)
5424 mod_timer(&adapter->watchdog_timer,
5425 round_jiffies(jiffies + HZ));
5426 else
5427 mod_timer(&adapter->watchdog_timer,
5428 round_jiffies(jiffies + 2 * HZ));
5429 }
3df25e4c
SN
5430}
5431
5432/**
63c92c9d
SN
5433 * igc_intr_msi - Interrupt Handler
5434 * @irq: interrupt number
5435 * @data: pointer to a network interface device structure
3df25e4c 5436 */
63c92c9d 5437static irqreturn_t igc_intr_msi(int irq, void *data)
3df25e4c 5438{
63c92c9d
SN
5439 struct igc_adapter *adapter = data;
5440 struct igc_q_vector *q_vector = adapter->q_vector[0];
5441 struct igc_hw *hw = &adapter->hw;
5442 /* read ICR disables interrupts using IAM */
5443 u32 icr = rd32(IGC_ICR);
3df25e4c 5444
63c92c9d 5445 igc_write_itr(q_vector);
3df25e4c 5446
63c92c9d
SN
5447 if (icr & IGC_ICR_DRSTA)
5448 schedule_work(&adapter->reset_task);
3df25e4c 5449
63c92c9d
SN
5450 if (icr & IGC_ICR_DOUTSYNC) {
5451 /* HW is reporting DMA is out of sync */
5452 adapter->stats.doosync++;
3df25e4c
SN
5453 }
5454
63c92c9d 5455 if (icr & (IGC_ICR_RXSEQ | IGC_ICR_LSC)) {
501f2309 5456 hw->mac.get_link_status = true;
63c92c9d
SN
5457 if (!test_bit(__IGC_DOWN, &adapter->state))
5458 mod_timer(&adapter->watchdog_timer, jiffies + 1);
3df25e4c
SN
5459 }
5460
63c92c9d 5461 napi_schedule(&q_vector->napi);
3df25e4c 5462
63c92c9d 5463 return IRQ_HANDLED;
3df25e4c
SN
5464}
5465
13b5b7fd 5466/**
63c92c9d
SN
5467 * igc_intr - Legacy Interrupt Handler
5468 * @irq: interrupt number
5469 * @data: pointer to a network interface device structure
13b5b7fd 5470 */
63c92c9d 5471static irqreturn_t igc_intr(int irq, void *data)
13b5b7fd 5472{
63c92c9d
SN
5473 struct igc_adapter *adapter = data;
5474 struct igc_q_vector *q_vector = adapter->q_vector[0];
5475 struct igc_hw *hw = &adapter->hw;
5476 /* Interrupt Auto-Mask...upon reading ICR, interrupts are masked. No
5477 * need for the IMC write
5478 */
5479 u32 icr = rd32(IGC_ICR);
13b5b7fd 5480
63c92c9d
SN
5481 /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is
5482 * not set, then the adapter didn't send an interrupt
5483 */
5484 if (!(icr & IGC_ICR_INT_ASSERTED))
5485 return IRQ_NONE;
13b5b7fd 5486
63c92c9d 5487 igc_write_itr(q_vector);
3df25e4c 5488
63c92c9d
SN
5489 if (icr & IGC_ICR_DRSTA)
5490 schedule_work(&adapter->reset_task);
3df25e4c 5491
63c92c9d
SN
5492 if (icr & IGC_ICR_DOUTSYNC) {
5493 /* HW is reporting DMA is out of sync */
5494 adapter->stats.doosync++;
3df25e4c
SN
5495 }
5496
63c92c9d 5497 if (icr & (IGC_ICR_RXSEQ | IGC_ICR_LSC)) {
501f2309 5498 hw->mac.get_link_status = true;
63c92c9d
SN
5499 /* guard against interrupt when we're going down */
5500 if (!test_bit(__IGC_DOWN, &adapter->state))
5501 mod_timer(&adapter->watchdog_timer, jiffies + 1);
5502 }
13b5b7fd 5503
63c92c9d 5504 napi_schedule(&q_vector->napi);
3df25e4c 5505
63c92c9d 5506 return IRQ_HANDLED;
3df25e4c
SN
5507}
5508
5509static void igc_free_irq(struct igc_adapter *adapter)
5510{
5511 if (adapter->msix_entries) {
5512 int vector = 0, i;
5513
5514 free_irq(adapter->msix_entries[vector++].vector, adapter);
5515
5516 for (i = 0; i < adapter->num_q_vectors; i++)
5517 free_irq(adapter->msix_entries[vector++].vector,
5518 adapter->q_vector[i]);
5519 } else {
5520 free_irq(adapter->pdev->irq, adapter);
5521 }
5522}
5523
3df25e4c
SN
5524/**
5525 * igc_request_irq - initialize interrupts
5526 * @adapter: Pointer to adapter structure
5527 *
5528 * Attempts to configure interrupts using the best available
5529 * capabilities of the hardware and kernel.
5530 */
5531static int igc_request_irq(struct igc_adapter *adapter)
5532{
13b5b7fd
SN
5533 struct net_device *netdev = adapter->netdev;
5534 struct pci_dev *pdev = adapter->pdev;
3df25e4c
SN
5535 int err = 0;
5536
5537 if (adapter->flags & IGC_FLAG_HAS_MSIX) {
5538 err = igc_request_msix(adapter);
5539 if (!err)
5540 goto request_done;
5541 /* fall back to MSI */
13b5b7fd
SN
5542 igc_free_all_tx_resources(adapter);
5543 igc_free_all_rx_resources(adapter);
3df25e4c
SN
5544
5545 igc_clear_interrupt_scheme(adapter);
5546 err = igc_init_interrupt_scheme(adapter, false);
5547 if (err)
5548 goto request_done;
13b5b7fd
SN
5549 igc_setup_all_tx_resources(adapter);
5550 igc_setup_all_rx_resources(adapter);
3df25e4c
SN
5551 igc_configure(adapter);
5552 }
5553
13b5b7fd
SN
5554 igc_assign_vector(adapter->q_vector[0], 0);
5555
5556 if (adapter->flags & IGC_FLAG_HAS_MSI) {
5557 err = request_irq(pdev->irq, &igc_intr_msi, 0,
5558 netdev->name, adapter);
5559 if (!err)
5560 goto request_done;
5561
5562 /* fall back to legacy interrupts */
5563 igc_reset_interrupt_capability(adapter);
5564 adapter->flags &= ~IGC_FLAG_HAS_MSI;
5565 }
5566
5567 err = request_irq(pdev->irq, &igc_intr, IRQF_SHARED,
5568 netdev->name, adapter);
5569
5570 if (err)
25f06eff 5571 netdev_err(netdev, "Error %d getting interrupt\n", err);
13b5b7fd 5572
3df25e4c
SN
5573request_done:
5574 return err;
5575}
5576
c9a11c23 5577/**
86efeccd 5578 * __igc_open - Called when a network interface is made active
c9a11c23 5579 * @netdev: network interface device structure
86efeccd 5580 * @resuming: boolean indicating if the device is resuming
c9a11c23
SN
5581 *
5582 * Returns 0 on success, negative value on failure
5583 *
5584 * The open entry point is called when a network interface is made
5585 * active by the system (IFF_UP). At this point all resources needed
5586 * for transmit and receive operations are allocated, the interrupt
5587 * handler is registered with the OS, the watchdog timer is started,
5588 * and the stack is notified that the interface is ready.
5589 */
5590static int __igc_open(struct net_device *netdev, bool resuming)
5591{
5592 struct igc_adapter *adapter = netdev_priv(netdev);
8594a7f3 5593 struct pci_dev *pdev = adapter->pdev;
c9a11c23 5594 struct igc_hw *hw = &adapter->hw;
3df25e4c 5595 int err = 0;
c9a11c23
SN
5596 int i = 0;
5597
5598 /* disallow open during test */
5599
5600 if (test_bit(__IGC_TESTING, &adapter->state)) {
5601 WARN_ON(resuming);
5602 return -EBUSY;
5603 }
5604
8594a7f3
SN
5605 if (!resuming)
5606 pm_runtime_get_sync(&pdev->dev);
5607
c9a11c23
SN
5608 netif_carrier_off(netdev);
5609
13b5b7fd
SN
5610 /* allocate transmit descriptors */
5611 err = igc_setup_all_tx_resources(adapter);
5612 if (err)
5613 goto err_setup_tx;
5614
5615 /* allocate receive descriptors */
5616 err = igc_setup_all_rx_resources(adapter);
5617 if (err)
5618 goto err_setup_rx;
5619
c9a11c23
SN
5620 igc_power_up_link(adapter);
5621
5622 igc_configure(adapter);
5623
3df25e4c
SN
5624 err = igc_request_irq(adapter);
5625 if (err)
5626 goto err_req_irq;
5627
5628 /* Notify the stack of the actual queue counts. */
14b21cec 5629 err = netif_set_real_num_tx_queues(netdev, adapter->num_tx_queues);
3df25e4c
SN
5630 if (err)
5631 goto err_set_queues;
5632
5633 err = netif_set_real_num_rx_queues(netdev, adapter->num_rx_queues);
5634 if (err)
5635 goto err_set_queues;
5636
c9a11c23
SN
5637 clear_bit(__IGC_DOWN, &adapter->state);
5638
5639 for (i = 0; i < adapter->num_q_vectors; i++)
5640 napi_enable(&adapter->q_vector[i]->napi);
5641
3df25e4c
SN
5642 /* Clear any pending interrupts. */
5643 rd32(IGC_ICR);
5644 igc_irq_enable(adapter);
5645
8594a7f3
SN
5646 if (!resuming)
5647 pm_runtime_put(&pdev->dev);
5648
13b5b7fd
SN
5649 netif_tx_start_all_queues(netdev);
5650
c9a11c23 5651 /* start the watchdog. */
501f2309 5652 hw->mac.get_link_status = true;
208983f0 5653 schedule_work(&adapter->watchdog_task);
c9a11c23
SN
5654
5655 return IGC_SUCCESS;
3df25e4c
SN
5656
5657err_set_queues:
5658 igc_free_irq(adapter);
5659err_req_irq:
5660 igc_release_hw_control(adapter);
a0beb3c1 5661 igc_power_down_phy_copper_base(&adapter->hw);
13b5b7fd
SN
5662 igc_free_all_rx_resources(adapter);
5663err_setup_rx:
5664 igc_free_all_tx_resources(adapter);
5665err_setup_tx:
5666 igc_reset(adapter);
8594a7f3
SN
5667 if (!resuming)
5668 pm_runtime_put(&pdev->dev);
3df25e4c
SN
5669
5670 return err;
c9a11c23
SN
5671}
5672
f026d8ca 5673int igc_open(struct net_device *netdev)
c9a11c23
SN
5674{
5675 return __igc_open(netdev, false);
5676}
5677
5678/**
86efeccd 5679 * __igc_close - Disables a network interface
c9a11c23 5680 * @netdev: network interface device structure
86efeccd 5681 * @suspending: boolean indicating the device is suspending
c9a11c23
SN
5682 *
5683 * Returns 0, this is not allowed to fail
5684 *
5685 * The close entry point is called when an interface is de-activated
5686 * by the OS. The hardware is still under the driver's control, but
5687 * needs to be disabled. A global MAC reset is issued to stop the
5688 * hardware, and all transmit and receive resources are freed.
5689 */
5690static int __igc_close(struct net_device *netdev, bool suspending)
5691{
5692 struct igc_adapter *adapter = netdev_priv(netdev);
8594a7f3 5693 struct pci_dev *pdev = adapter->pdev;
c9a11c23
SN
5694
5695 WARN_ON(test_bit(__IGC_RESETTING, &adapter->state));
5696
8594a7f3
SN
5697 if (!suspending)
5698 pm_runtime_get_sync(&pdev->dev);
5699
c9a11c23
SN
5700 igc_down(adapter);
5701
5702 igc_release_hw_control(adapter);
5703
3df25e4c
SN
5704 igc_free_irq(adapter);
5705
13b5b7fd
SN
5706 igc_free_all_tx_resources(adapter);
5707 igc_free_all_rx_resources(adapter);
5708
8594a7f3
SN
5709 if (!suspending)
5710 pm_runtime_put_sync(&pdev->dev);
5711
c9a11c23
SN
5712 return 0;
5713}
5714
f026d8ca 5715int igc_close(struct net_device *netdev)
c9a11c23
SN
5716{
5717 if (netif_device_present(netdev) || netdev->dismantle)
5718 return __igc_close(netdev, false);
5719 return 0;
5720}
5721
5f295805
VCG
5722/**
5723 * igc_ioctl - Access the hwtstamp interface
5724 * @netdev: network interface device structure
b50f7bca 5725 * @ifr: interface request data
5f295805
VCG
5726 * @cmd: ioctl command
5727 **/
5728static int igc_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
5729{
5730 switch (cmd) {
5731 case SIOCGHWTSTAMP:
5732 return igc_ptp_get_ts_config(netdev, ifr);
5733 case SIOCSHWTSTAMP:
5734 return igc_ptp_set_ts_config(netdev, ifr);
5735 default:
5736 return -EOPNOTSUPP;
5737 }
5738}
5739
82faa9b7
VCG
5740static int igc_save_launchtime_params(struct igc_adapter *adapter, int queue,
5741 bool enable)
5742{
5743 struct igc_ring *ring;
5744 int i;
5745
5746 if (queue < 0 || queue >= adapter->num_tx_queues)
5747 return -EINVAL;
5748
5749 ring = adapter->tx_ring[queue];
5750 ring->launchtime_enable = enable;
5751
5752 if (adapter->base_time)
5753 return 0;
5754
5755 adapter->cycle_time = NSEC_PER_SEC;
5756
5757 for (i = 0; i < adapter->num_tx_queues; i++) {
5758 ring = adapter->tx_ring[i];
5759 ring->start_time = 0;
5760 ring->end_time = NSEC_PER_SEC;
5761 }
5762
5763 return 0;
5764}
5765
58c4ee0e
VCG
5766static bool is_base_time_past(ktime_t base_time, const struct timespec64 *now)
5767{
5768 struct timespec64 b;
5769
5770 b = ktime_to_timespec64(base_time);
5771
5772 return timespec64_compare(now, &b) > 0;
5773}
5774
5775static bool validate_schedule(struct igc_adapter *adapter,
5776 const struct tc_taprio_qopt_offload *qopt)
ec50a9d4
VCG
5777{
5778 int queue_uses[IGC_MAX_TX_QUEUES] = { };
58c4ee0e 5779 struct timespec64 now;
ec50a9d4
VCG
5780 size_t n;
5781
5782 if (qopt->cycle_time_extension)
5783 return false;
5784
58c4ee0e
VCG
5785 igc_ptp_read(adapter, &now);
5786
5787 /* If we program the controller's BASET registers with a time
5788 * in the future, it will hold all the packets until that
5789 * time, causing a lot of TX Hangs, so to avoid that, we
5790 * reject schedules that would start in the future.
5791 */
5792 if (!is_base_time_past(qopt->base_time, &now))
5793 return false;
5794
ec50a9d4
VCG
5795 for (n = 0; n < qopt->num_entries; n++) {
5796 const struct tc_taprio_sched_entry *e;
5797 int i;
5798
5799 e = &qopt->entries[n];
5800
5801 /* i225 only supports "global" frame preemption
5802 * settings.
5803 */
5804 if (e->command != TC_TAPRIO_CMD_SET_GATES)
5805 return false;
5806
5807 for (i = 0; i < IGC_MAX_TX_QUEUES; i++) {
5808 if (e->gate_mask & BIT(i))
5809 queue_uses[i]++;
5810
5811 if (queue_uses[i] > 1)
5812 return false;
5813 }
5814 }
5815
5816 return true;
5817}
5818
82faa9b7
VCG
5819static int igc_tsn_enable_launchtime(struct igc_adapter *adapter,
5820 struct tc_etf_qopt_offload *qopt)
5821{
5822 struct igc_hw *hw = &adapter->hw;
5823 int err;
5824
5825 if (hw->mac.type != igc_i225)
5826 return -EOPNOTSUPP;
5827
5828 err = igc_save_launchtime_params(adapter, qopt->queue, qopt->enable);
5829 if (err)
5830 return err;
5831
5832 return igc_tsn_offload_apply(adapter);
5833}
5834
ec50a9d4
VCG
5835static int igc_save_qbv_schedule(struct igc_adapter *adapter,
5836 struct tc_taprio_qopt_offload *qopt)
5837{
5838 u32 start_time = 0, end_time = 0;
5839 size_t n;
5840
5841 if (!qopt->enable) {
5842 adapter->base_time = 0;
5843 return 0;
5844 }
5845
5846 if (adapter->base_time)
5847 return -EALREADY;
5848
58c4ee0e 5849 if (!validate_schedule(adapter, qopt))
ec50a9d4
VCG
5850 return -EINVAL;
5851
5852 adapter->cycle_time = qopt->cycle_time;
5853 adapter->base_time = qopt->base_time;
5854
5855 /* FIXME: be a little smarter about cases when the gate for a
5856 * queue stays open for more than one entry.
5857 */
5858 for (n = 0; n < qopt->num_entries; n++) {
5859 struct tc_taprio_sched_entry *e = &qopt->entries[n];
5860 int i;
5861
5862 end_time += e->interval;
5863
5864 for (i = 0; i < IGC_MAX_TX_QUEUES; i++) {
5865 struct igc_ring *ring = adapter->tx_ring[i];
5866
5867 if (!(e->gate_mask & BIT(i)))
5868 continue;
5869
5870 ring->start_time = start_time;
5871 ring->end_time = end_time;
5872 }
5873
5874 start_time += e->interval;
5875 }
5876
5877 return 0;
5878}
5879
5880static int igc_tsn_enable_qbv_scheduling(struct igc_adapter *adapter,
5881 struct tc_taprio_qopt_offload *qopt)
5882{
5883 struct igc_hw *hw = &adapter->hw;
5884 int err;
5885
5886 if (hw->mac.type != igc_i225)
5887 return -EOPNOTSUPP;
5888
5889 err = igc_save_qbv_schedule(adapter, qopt);
5890 if (err)
5891 return err;
5892
5893 return igc_tsn_offload_apply(adapter);
5894}
5895
5896static int igc_setup_tc(struct net_device *dev, enum tc_setup_type type,
5897 void *type_data)
5898{
5899 struct igc_adapter *adapter = netdev_priv(dev);
5900
5901 switch (type) {
5902 case TC_SETUP_QDISC_TAPRIO:
5903 return igc_tsn_enable_qbv_scheduling(adapter, type_data);
5904
82faa9b7
VCG
5905 case TC_SETUP_QDISC_ETF:
5906 return igc_tsn_enable_launchtime(adapter, type_data);
5907
ec50a9d4
VCG
5908 default:
5909 return -EOPNOTSUPP;
5910 }
5911}
5912
26575105
AG
5913static int igc_bpf(struct net_device *dev, struct netdev_bpf *bpf)
5914{
5915 struct igc_adapter *adapter = netdev_priv(dev);
5916
5917 switch (bpf->command) {
5918 case XDP_SETUP_PROG:
5919 return igc_xdp_set_prog(adapter, bpf->prog, bpf->extack);
fc9df2a0
AG
5920 case XDP_SETUP_XSK_POOL:
5921 return igc_xdp_setup_pool(adapter, bpf->xsk.pool,
5922 bpf->xsk.queue_id);
26575105
AG
5923 default:
5924 return -EOPNOTSUPP;
5925 }
5926}
5927
4ff32036
AG
5928static int igc_xdp_xmit(struct net_device *dev, int num_frames,
5929 struct xdp_frame **frames, u32 flags)
5930{
5931 struct igc_adapter *adapter = netdev_priv(dev);
5932 int cpu = smp_processor_id();
5933 struct netdev_queue *nq;
5934 struct igc_ring *ring;
5935 int i, drops;
5936
5937 if (unlikely(test_bit(__IGC_DOWN, &adapter->state)))
5938 return -ENETDOWN;
5939
5940 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
5941 return -EINVAL;
5942
5943 ring = igc_xdp_get_tx_ring(adapter, cpu);
5944 nq = txring_txq(ring);
5945
5946 __netif_tx_lock(nq, cpu);
5947
5948 drops = 0;
5949 for (i = 0; i < num_frames; i++) {
5950 int err;
5951 struct xdp_frame *xdpf = frames[i];
5952
5953 err = igc_xdp_init_tx_descriptor(ring, xdpf);
5954 if (err) {
5955 xdp_return_frame_rx_napi(xdpf);
5956 drops++;
5957 }
5958 }
5959
5960 if (flags & XDP_XMIT_FLUSH)
5961 igc_flush_tx_descriptors(ring);
5962
5963 __netif_tx_unlock(nq);
5964
5965 return num_frames - drops;
5966}
5967
fc9df2a0
AG
5968static void igc_trigger_rxtxq_interrupt(struct igc_adapter *adapter,
5969 struct igc_q_vector *q_vector)
5970{
5971 struct igc_hw *hw = &adapter->hw;
5972 u32 eics = 0;
5973
5974 eics |= q_vector->eims_value;
5975 wr32(IGC_EICS, eics);
5976}
5977
5978int igc_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags)
5979{
5980 struct igc_adapter *adapter = netdev_priv(dev);
5981 struct igc_q_vector *q_vector;
5982 struct igc_ring *ring;
5983
5984 if (test_bit(__IGC_DOWN, &adapter->state))
5985 return -ENETDOWN;
5986
5987 if (!igc_xdp_is_enabled(adapter))
5988 return -ENXIO;
5989
5990 if (queue_id >= adapter->num_rx_queues)
5991 return -EINVAL;
5992
5993 ring = adapter->rx_ring[queue_id];
5994
5995 if (!ring->xsk_pool)
5996 return -ENXIO;
5997
5998 q_vector = adapter->q_vector[queue_id];
5999 if (!napi_if_scheduled_mark_missed(&q_vector->napi))
6000 igc_trigger_rxtxq_interrupt(adapter, q_vector);
6001
6002 return 0;
6003}
6004
c9a11c23
SN
6005static const struct net_device_ops igc_netdev_ops = {
6006 .ndo_open = igc_open,
6007 .ndo_stop = igc_close,
6008 .ndo_start_xmit = igc_xmit_frame,
7f839684 6009 .ndo_set_rx_mode = igc_set_rx_mode,
c9a11c23
SN
6010 .ndo_set_mac_address = igc_set_mac,
6011 .ndo_change_mtu = igc_change_mtu,
6b7ed22a 6012 .ndo_get_stats64 = igc_get_stats64,
65cd3a72
SN
6013 .ndo_fix_features = igc_fix_features,
6014 .ndo_set_features = igc_set_features,
6015 .ndo_features_check = igc_features_check,
a7605370 6016 .ndo_eth_ioctl = igc_ioctl,
ec50a9d4 6017 .ndo_setup_tc = igc_setup_tc,
26575105 6018 .ndo_bpf = igc_bpf,
4ff32036 6019 .ndo_xdp_xmit = igc_xdp_xmit,
fc9df2a0 6020 .ndo_xsk_wakeup = igc_xsk_wakeup,
c9a11c23 6021};
146740f9
SN
6022
6023/* PCIe configuration access */
6024void igc_read_pci_cfg(struct igc_hw *hw, u32 reg, u16 *value)
6025{
6026 struct igc_adapter *adapter = hw->back;
6027
6028 pci_read_config_word(adapter->pdev, reg, value);
6029}
6030
6031void igc_write_pci_cfg(struct igc_hw *hw, u32 reg, u16 *value)
6032{
6033 struct igc_adapter *adapter = hw->back;
6034
6035 pci_write_config_word(adapter->pdev, reg, *value);
6036}
6037
6038s32 igc_read_pcie_cap_reg(struct igc_hw *hw, u32 reg, u16 *value)
6039{
6040 struct igc_adapter *adapter = hw->back;
146740f9 6041
a16f6d3a 6042 if (!pci_is_pcie(adapter->pdev))
146740f9
SN
6043 return -IGC_ERR_CONFIG;
6044
a16f6d3a 6045 pcie_capability_read_word(adapter->pdev, reg, value);
146740f9
SN
6046
6047 return IGC_SUCCESS;
6048}
6049
6050s32 igc_write_pcie_cap_reg(struct igc_hw *hw, u32 reg, u16 *value)
6051{
6052 struct igc_adapter *adapter = hw->back;
146740f9 6053
a16f6d3a 6054 if (!pci_is_pcie(adapter->pdev))
146740f9
SN
6055 return -IGC_ERR_CONFIG;
6056
a16f6d3a 6057 pcie_capability_write_word(adapter->pdev, reg, *value);
146740f9
SN
6058
6059 return IGC_SUCCESS;
6060}
6061
6062u32 igc_rd32(struct igc_hw *hw, u32 reg)
6063{
c9a11c23 6064 struct igc_adapter *igc = container_of(hw, struct igc_adapter, hw);
146740f9
SN
6065 u8 __iomem *hw_addr = READ_ONCE(hw->hw_addr);
6066 u32 value = 0;
6067
146740f9
SN
6068 value = readl(&hw_addr[reg]);
6069
6070 /* reads should not return all F's */
c9a11c23
SN
6071 if (!(~value) && (!reg || !(~readl(hw_addr)))) {
6072 struct net_device *netdev = igc->netdev;
6073
146740f9 6074 hw->hw_addr = NULL;
c9a11c23
SN
6075 netif_device_detach(netdev);
6076 netdev_err(netdev, "PCIe link lost, device now detached\n");
94bc1e52
LP
6077 WARN(pci_device_is_present(igc->pdev),
6078 "igc: Failed to read reg 0x%x!\n", reg);
c9a11c23 6079 }
146740f9
SN
6080
6081 return value;
6082}
6083
8c5ad0da
SN
6084int igc_set_spd_dplx(struct igc_adapter *adapter, u32 spd, u8 dplx)
6085{
8c5ad0da
SN
6086 struct igc_mac_info *mac = &adapter->hw.mac;
6087
501f2309 6088 mac->autoneg = false;
8c5ad0da
SN
6089
6090 /* Make sure dplx is at most 1 bit and lsb of speed is not set
6091 * for the switch() below to work
6092 */
6093 if ((spd & 1) || (dplx & ~1))
6094 goto err_inval;
6095
6096 switch (spd + dplx) {
6097 case SPEED_10 + DUPLEX_HALF:
6098 mac->forced_speed_duplex = ADVERTISE_10_HALF;
6099 break;
6100 case SPEED_10 + DUPLEX_FULL:
6101 mac->forced_speed_duplex = ADVERTISE_10_FULL;
6102 break;
6103 case SPEED_100 + DUPLEX_HALF:
6104 mac->forced_speed_duplex = ADVERTISE_100_HALF;
6105 break;
6106 case SPEED_100 + DUPLEX_FULL:
6107 mac->forced_speed_duplex = ADVERTISE_100_FULL;
6108 break;
6109 case SPEED_1000 + DUPLEX_FULL:
501f2309 6110 mac->autoneg = true;
8c5ad0da
SN
6111 adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL;
6112 break;
6113 case SPEED_1000 + DUPLEX_HALF: /* not supported */
6114 goto err_inval;
6115 case SPEED_2500 + DUPLEX_FULL:
501f2309 6116 mac->autoneg = true;
8c5ad0da
SN
6117 adapter->hw.phy.autoneg_advertised = ADVERTISE_2500_FULL;
6118 break;
6119 case SPEED_2500 + DUPLEX_HALF: /* not supported */
6120 default:
6121 goto err_inval;
6122 }
6123
6124 /* clear MDI, MDI(-X) override is only allowed when autoneg enabled */
6125 adapter->hw.phy.mdix = AUTO_ALL_MODES;
6126
6127 return 0;
6128
6129err_inval:
25f06eff 6130 netdev_err(adapter->netdev, "Unsupported Speed/Duplex configuration\n");
8c5ad0da
SN
6131 return -EINVAL;
6132}
6133
d89f8841
SN
6134/**
6135 * igc_probe - Device Initialization Routine
6136 * @pdev: PCI device information struct
6137 * @ent: entry in igc_pci_tbl
6138 *
6139 * Returns 0 on success, negative on failure
6140 *
6141 * igc_probe initializes an adapter identified by a pci_dev structure.
6142 * The OS initialization, configuring the adapter private structure,
6143 * and a hardware reset occur.
6144 */
6145static int igc_probe(struct pci_dev *pdev,
6146 const struct pci_device_id *ent)
6147{
146740f9 6148 struct igc_adapter *adapter;
c9a11c23
SN
6149 struct net_device *netdev;
6150 struct igc_hw *hw;
ab405612 6151 const struct igc_info *ei = igc_info_tbl[ent->driver_data];
21da01fd 6152 int err, pci_using_dac;
d89f8841
SN
6153
6154 err = pci_enable_device_mem(pdev);
6155 if (err)
6156 return err;
6157
21da01fd
SN
6158 pci_using_dac = 0;
6159 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
d89f8841 6160 if (!err) {
21da01fd 6161 pci_using_dac = 1;
d89f8841 6162 } else {
21da01fd 6163 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
d89f8841 6164 if (err) {
21da01fd
SN
6165 dev_err(&pdev->dev,
6166 "No usable DMA configuration, aborting\n");
6167 goto err_dma;
d89f8841
SN
6168 }
6169 }
6170
21da01fd 6171 err = pci_request_mem_regions(pdev, igc_driver_name);
d89f8841
SN
6172 if (err)
6173 goto err_pci_reg;
6174
c9a11c23
SN
6175 pci_enable_pcie_error_reporting(pdev);
6176
d89f8841 6177 pci_set_master(pdev);
c9a11c23
SN
6178
6179 err = -ENOMEM;
6180 netdev = alloc_etherdev_mq(sizeof(struct igc_adapter),
6181 IGC_MAX_TX_QUEUES);
6182
6183 if (!netdev)
6184 goto err_alloc_etherdev;
6185
6186 SET_NETDEV_DEV(netdev, &pdev->dev);
6187
6188 pci_set_drvdata(pdev, netdev);
6189 adapter = netdev_priv(netdev);
6190 adapter->netdev = netdev;
6191 adapter->pdev = pdev;
6192 hw = &adapter->hw;
6193 hw->back = adapter;
6194 adapter->port_num = hw->bus.func;
8c5ad0da 6195 adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
c9a11c23 6196
d89f8841 6197 err = pci_save_state(pdev);
c9a11c23
SN
6198 if (err)
6199 goto err_ioremap;
6200
6201 err = -EIO;
6202 adapter->io_addr = ioremap(pci_resource_start(pdev, 0),
6203 pci_resource_len(pdev, 0));
6204 if (!adapter->io_addr)
6205 goto err_ioremap;
6206
6207 /* hw->hw_addr can be zeroed, so use adapter->io_addr for unmap */
6208 hw->hw_addr = adapter->io_addr;
6209
6210 netdev->netdev_ops = &igc_netdev_ops;
7df76bd1 6211 igc_ethtool_set_ops(netdev);
c9a11c23
SN
6212 netdev->watchdog_timeo = 5 * HZ;
6213
6214 netdev->mem_start = pci_resource_start(pdev, 0);
6215 netdev->mem_end = pci_resource_end(pdev, 0);
6216
6217 /* PCI config space info */
6218 hw->vendor_id = pdev->vendor;
6219 hw->device_id = pdev->device;
6220 hw->revision_id = pdev->revision;
6221 hw->subsystem_vendor_id = pdev->subsystem_vendor;
6222 hw->subsystem_device_id = pdev->subsystem_device;
146740f9 6223
ab405612
SN
6224 /* Copy the default MAC and PHY function pointers */
6225 memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops));
5586838f 6226 memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops));
ab405612
SN
6227
6228 /* Initialize skew-specific constants */
6229 err = ei->get_invariants(hw);
6230 if (err)
6231 goto err_sw_init;
6232
d3ae3cfb 6233 /* Add supported features to the features list*/
b7b46245 6234 netdev->features |= NETIF_F_SG;
f38b782d
SN
6235 netdev->features |= NETIF_F_TSO;
6236 netdev->features |= NETIF_F_TSO6;
8e8204a4 6237 netdev->features |= NETIF_F_TSO_ECN;
3bdd7086 6238 netdev->features |= NETIF_F_RXCSUM;
d3ae3cfb 6239 netdev->features |= NETIF_F_HW_CSUM;
0ac960a8 6240 netdev->features |= NETIF_F_SCTP_CRC;
635071e2 6241 netdev->features |= NETIF_F_HW_TC;
d3ae3cfb 6242
34428dff
SN
6243#define IGC_GSO_PARTIAL_FEATURES (NETIF_F_GSO_GRE | \
6244 NETIF_F_GSO_GRE_CSUM | \
6245 NETIF_F_GSO_IPXIP4 | \
6246 NETIF_F_GSO_IPXIP6 | \
6247 NETIF_F_GSO_UDP_TUNNEL | \
6248 NETIF_F_GSO_UDP_TUNNEL_CSUM)
6249
6250 netdev->gso_partial_features = IGC_GSO_PARTIAL_FEATURES;
6251 netdev->features |= NETIF_F_GSO_PARTIAL | IGC_GSO_PARTIAL_FEATURES;
d3ae3cfb 6252
146740f9
SN
6253 /* setup the private structure */
6254 err = igc_sw_init(adapter);
6255 if (err)
6256 goto err_sw_init;
6257
65cd3a72
SN
6258 /* copy netdev features into list of user selectable features */
6259 netdev->hw_features |= NETIF_F_NTUPLE;
8d744963
MHZ
6260 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX;
6261 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
d3ae3cfb 6262 netdev->hw_features |= netdev->features;
65cd3a72 6263
4439dc42
SN
6264 if (pci_using_dac)
6265 netdev->features |= NETIF_F_HIGHDMA;
6266
8d744963
MHZ
6267 netdev->vlan_features |= netdev->features;
6268
c9a11c23
SN
6269 /* MTU range: 68 - 9216 */
6270 netdev->min_mtu = ETH_MIN_MTU;
6271 netdev->max_mtu = MAX_STD_JUMBO_FRAME_SIZE;
6272
4eb80801
SN
6273 /* before reading the NVM, reset the controller to put the device in a
6274 * known good starting state
6275 */
6276 hw->mac.ops.reset_hw(hw);
6277
9b924edd
SN
6278 if (igc_get_flash_presence_i225(hw)) {
6279 if (hw->nvm.ops.validate(hw) < 0) {
25f06eff 6280 dev_err(&pdev->dev, "The NVM Checksum Is Not Valid\n");
9b924edd
SN
6281 err = -EIO;
6282 goto err_eeprom;
6283 }
6284 }
6285
4eb80801
SN
6286 if (eth_platform_get_mac_address(&pdev->dev, hw->mac.addr)) {
6287 /* copy the MAC address out of the NVM */
6288 if (hw->mac.ops.read_mac_addr(hw))
6289 dev_err(&pdev->dev, "NVM Read Error\n");
6290 }
6291
6292 memcpy(netdev->dev_addr, hw->mac.addr, netdev->addr_len);
6293
6294 if (!is_valid_ether_addr(netdev->dev_addr)) {
6295 dev_err(&pdev->dev, "Invalid MAC Address\n");
6296 err = -EIO;
6297 goto err_eeprom;
6298 }
6299
0507ef8a
SN
6300 /* configure RXPBSIZE and TXPBSIZE */
6301 wr32(IGC_RXPBS, I225_RXPBSIZE_DEFAULT);
6302 wr32(IGC_TXPBS, I225_TXPBSIZE_DEFAULT);
6303
6304 timer_setup(&adapter->watchdog_timer, igc_watchdog, 0);
208983f0 6305 timer_setup(&adapter->phy_info_timer, igc_update_phy_info, 0);
0507ef8a
SN
6306
6307 INIT_WORK(&adapter->reset_task, igc_reset_task);
208983f0 6308 INIT_WORK(&adapter->watchdog_task, igc_watchdog_task);
0507ef8a 6309
4eb80801
SN
6310 /* Initialize link properties that are user-changeable */
6311 adapter->fc_autoneg = true;
6312 hw->mac.autoneg = true;
6313 hw->phy.autoneg_advertised = 0xaf;
6314
6315 hw->fc.requested_mode = igc_fc_default;
6316 hw->fc.current_mode = igc_fc_default;
6317
e055600d
SN
6318 /* By default, support wake on port A */
6319 adapter->flags |= IGC_FLAG_WOL_SUPPORTED;
6320
6321 /* initialize the wol settings based on the eeprom settings */
6322 if (adapter->flags & IGC_FLAG_WOL_SUPPORTED)
6323 adapter->wol |= IGC_WUFC_MAG;
6324
6325 device_set_wakeup_enable(&adapter->pdev->dev,
6326 adapter->flags & IGC_FLAG_WOL_SUPPORTED);
6327
3cda505a
VCG
6328 igc_ptp_init(adapter);
6329
c9a11c23
SN
6330 /* reset the hardware with the new settings */
6331 igc_reset(adapter);
6332
6333 /* let the f/w know that the h/w is now under the control of the
6334 * driver.
6335 */
6336 igc_get_hw_control(adapter);
6337
6338 strncpy(netdev->name, "eth%d", IFNAMSIZ);
6339 err = register_netdev(netdev);
6340 if (err)
6341 goto err_register;
6342
6343 /* carrier off reporting is important to ethtool even BEFORE open */
6344 netif_carrier_off(netdev);
6345
ab405612
SN
6346 /* Check if Media Autosense is enabled */
6347 adapter->ei = *ei;
6348
c9a11c23
SN
6349 /* print pcie link status and MAC address */
6350 pcie_print_link_status(pdev);
6351 netdev_info(netdev, "MAC: %pM\n", netdev->dev_addr);
6352
e0751556 6353 dev_pm_set_driver_flags(&pdev->dev, DPM_FLAG_NO_DIRECT_COMPLETE);
93ec439a
SN
6354 /* Disable EEE for internal PHY devices */
6355 hw->dev_spec._base.eee_enable = false;
6356 adapter->flags &= ~IGC_FLAG_EEE;
6357 igc_set_eee_i225(hw, false, false, false);
8594a7f3
SN
6358
6359 pm_runtime_put_noidle(&pdev->dev);
6360
d89f8841
SN
6361 return 0;
6362
c9a11c23
SN
6363err_register:
6364 igc_release_hw_control(adapter);
4eb80801
SN
6365err_eeprom:
6366 if (!igc_check_reset_block(hw))
6367 igc_reset_phy(hw);
146740f9 6368err_sw_init:
3df25e4c
SN
6369 igc_clear_interrupt_scheme(adapter);
6370 iounmap(adapter->io_addr);
c9a11c23
SN
6371err_ioremap:
6372 free_netdev(netdev);
6373err_alloc_etherdev:
c6bc9e5c 6374 pci_disable_pcie_error_reporting(pdev);
faf4dd52 6375 pci_release_mem_regions(pdev);
d89f8841
SN
6376err_pci_reg:
6377err_dma:
6378 pci_disable_device(pdev);
6379 return err;
6380}
6381
6382/**
6383 * igc_remove - Device Removal Routine
6384 * @pdev: PCI device information struct
6385 *
6386 * igc_remove is called by the PCI subsystem to alert the driver
6387 * that it should release a PCI device. This could be caused by a
6388 * Hot-Plug event, or because the driver is going to be removed from
6389 * memory.
6390 */
6391static void igc_remove(struct pci_dev *pdev)
6392{
c9a11c23
SN
6393 struct net_device *netdev = pci_get_drvdata(pdev);
6394 struct igc_adapter *adapter = netdev_priv(netdev);
6395
8594a7f3
SN
6396 pm_runtime_get_noresume(&pdev->dev);
6397
e256ec83
AG
6398 igc_flush_nfc_rules(adapter);
6399
5f295805
VCG
6400 igc_ptp_stop(adapter);
6401
c9a11c23 6402 set_bit(__IGC_DOWN, &adapter->state);
0507ef8a
SN
6403
6404 del_timer_sync(&adapter->watchdog_timer);
208983f0 6405 del_timer_sync(&adapter->phy_info_timer);
0507ef8a
SN
6406
6407 cancel_work_sync(&adapter->reset_task);
208983f0 6408 cancel_work_sync(&adapter->watchdog_task);
c9a11c23
SN
6409
6410 /* Release control of h/w to f/w. If f/w is AMT enabled, this
6411 * would have already happened in close and is redundant.
6412 */
6413 igc_release_hw_control(adapter);
6414 unregister_netdev(netdev);
6415
0507ef8a
SN
6416 igc_clear_interrupt_scheme(adapter);
6417 pci_iounmap(pdev, adapter->io_addr);
6418 pci_release_mem_regions(pdev);
d89f8841 6419
c9a11c23 6420 free_netdev(netdev);
0507ef8a
SN
6421
6422 pci_disable_pcie_error_reporting(pdev);
6423
d89f8841
SN
6424 pci_disable_device(pdev);
6425}
6426
9513d2a5
SN
6427static int __igc_shutdown(struct pci_dev *pdev, bool *enable_wake,
6428 bool runtime)
6429{
6430 struct net_device *netdev = pci_get_drvdata(pdev);
6431 struct igc_adapter *adapter = netdev_priv(netdev);
6432 u32 wufc = runtime ? IGC_WUFC_LNKC : adapter->wol;
6433 struct igc_hw *hw = &adapter->hw;
6434 u32 ctrl, rctl, status;
6435 bool wake;
6436
6437 rtnl_lock();
6438 netif_device_detach(netdev);
6439
6440 if (netif_running(netdev))
6441 __igc_close(netdev, true);
6442
a5136f76
SN
6443 igc_ptp_suspend(adapter);
6444
9513d2a5
SN
6445 igc_clear_interrupt_scheme(adapter);
6446 rtnl_unlock();
6447
6448 status = rd32(IGC_STATUS);
6449 if (status & IGC_STATUS_LU)
6450 wufc &= ~IGC_WUFC_LNKC;
6451
6452 if (wufc) {
6453 igc_setup_rctl(adapter);
6454 igc_set_rx_mode(netdev);
6455
6456 /* turn on all-multi mode if wake on multicast is enabled */
6457 if (wufc & IGC_WUFC_MC) {
6458 rctl = rd32(IGC_RCTL);
6459 rctl |= IGC_RCTL_MPE;
6460 wr32(IGC_RCTL, rctl);
6461 }
6462
6463 ctrl = rd32(IGC_CTRL);
6464 ctrl |= IGC_CTRL_ADVD3WUC;
6465 wr32(IGC_CTRL, ctrl);
6466
6467 /* Allow time for pending master requests to run */
6468 igc_disable_pcie_master(hw);
6469
6470 wr32(IGC_WUC, IGC_WUC_PME_EN);
6471 wr32(IGC_WUFC, wufc);
6472 } else {
6473 wr32(IGC_WUC, 0);
6474 wr32(IGC_WUFC, 0);
6475 }
6476
6477 wake = wufc || adapter->en_mng_pt;
6478 if (!wake)
a0beb3c1 6479 igc_power_down_phy_copper_base(&adapter->hw);
9513d2a5
SN
6480 else
6481 igc_power_up_link(adapter);
6482
6483 if (enable_wake)
6484 *enable_wake = wake;
6485
6486 /* Release control of h/w to f/w. If f/w is AMT enabled, this
6487 * would have already happened in close and is redundant.
6488 */
6489 igc_release_hw_control(adapter);
6490
6491 pci_disable_device(pdev);
6492
6493 return 0;
6494}
6495
6496#ifdef CONFIG_PM
6497static int __maybe_unused igc_runtime_suspend(struct device *dev)
6498{
6499 return __igc_shutdown(to_pci_dev(dev), NULL, 1);
6500}
6501
6502static void igc_deliver_wake_packet(struct net_device *netdev)
6503{
6504 struct igc_adapter *adapter = netdev_priv(netdev);
6505 struct igc_hw *hw = &adapter->hw;
6506 struct sk_buff *skb;
6507 u32 wupl;
6508
6509 wupl = rd32(IGC_WUPL) & IGC_WUPL_MASK;
6510
6511 /* WUPM stores only the first 128 bytes of the wake packet.
6512 * Read the packet only if we have the whole thing.
6513 */
6514 if (wupl == 0 || wupl > IGC_WUPM_BYTES)
6515 return;
6516
6517 skb = netdev_alloc_skb_ip_align(netdev, IGC_WUPM_BYTES);
6518 if (!skb)
6519 return;
6520
6521 skb_put(skb, wupl);
6522
6523 /* Ensure reads are 32-bit aligned */
6524 wupl = roundup(wupl, 4);
6525
6526 memcpy_fromio(skb->data, hw->hw_addr + IGC_WUPM_REG(0), wupl);
6527
6528 skb->protocol = eth_type_trans(skb, netdev);
6529 netif_rx(skb);
6530}
6531
6532static int __maybe_unused igc_resume(struct device *dev)
6533{
6534 struct pci_dev *pdev = to_pci_dev(dev);
6535 struct net_device *netdev = pci_get_drvdata(pdev);
6536 struct igc_adapter *adapter = netdev_priv(netdev);
6537 struct igc_hw *hw = &adapter->hw;
6538 u32 err, val;
6539
6540 pci_set_power_state(pdev, PCI_D0);
6541 pci_restore_state(pdev);
6542 pci_save_state(pdev);
6543
6544 if (!pci_device_is_present(pdev))
6545 return -ENODEV;
6546 err = pci_enable_device_mem(pdev);
6547 if (err) {
25f06eff 6548 netdev_err(netdev, "Cannot enable PCI device from suspend\n");
9513d2a5
SN
6549 return err;
6550 }
6551 pci_set_master(pdev);
6552
6553 pci_enable_wake(pdev, PCI_D3hot, 0);
6554 pci_enable_wake(pdev, PCI_D3cold, 0);
6555
6556 if (igc_init_interrupt_scheme(adapter, true)) {
25f06eff 6557 netdev_err(netdev, "Unable to allocate memory for queues\n");
9513d2a5
SN
6558 return -ENOMEM;
6559 }
6560
6561 igc_reset(adapter);
6562
6563 /* let the f/w know that the h/w is now under the control of the
6564 * driver.
6565 */
6566 igc_get_hw_control(adapter);
6567
6568 val = rd32(IGC_WUS);
6569 if (val & WAKE_PKT_WUS)
6570 igc_deliver_wake_packet(netdev);
6571
6572 wr32(IGC_WUS, ~0);
6573
6574 rtnl_lock();
6575 if (!err && netif_running(netdev))
6576 err = __igc_open(netdev, true);
6577
6578 if (!err)
6579 netif_device_attach(netdev);
6580 rtnl_unlock();
6581
6582 return err;
6583}
6584
6585static int __maybe_unused igc_runtime_resume(struct device *dev)
6586{
6587 return igc_resume(dev);
6588}
6589
6590static int __maybe_unused igc_suspend(struct device *dev)
6591{
6592 return __igc_shutdown(to_pci_dev(dev), NULL, 0);
6593}
6594
6595static int __maybe_unused igc_runtime_idle(struct device *dev)
6596{
6597 struct net_device *netdev = dev_get_drvdata(dev);
6598 struct igc_adapter *adapter = netdev_priv(netdev);
6599
6600 if (!igc_has_link(adapter))
6601 pm_schedule_suspend(dev, MSEC_PER_SEC * 5);
6602
6603 return -EBUSY;
6604}
6605#endif /* CONFIG_PM */
6606
6607static void igc_shutdown(struct pci_dev *pdev)
6608{
6609 bool wake;
6610
6611 __igc_shutdown(pdev, &wake, 0);
6612
6613 if (system_state == SYSTEM_POWER_OFF) {
6614 pci_wake_from_d3(pdev, wake);
6615 pci_set_power_state(pdev, PCI_D3hot);
6616 }
6617}
6618
bc23aa94
SN
6619/**
6620 * igc_io_error_detected - called when PCI error is detected
6621 * @pdev: Pointer to PCI device
6622 * @state: The current PCI connection state
6623 *
6624 * This function is called after a PCI bus error affecting
6625 * this device has been detected.
6626 **/
6627static pci_ers_result_t igc_io_error_detected(struct pci_dev *pdev,
6628 pci_channel_state_t state)
6629{
6630 struct net_device *netdev = pci_get_drvdata(pdev);
6631 struct igc_adapter *adapter = netdev_priv(netdev);
6632
6633 netif_device_detach(netdev);
6634
6635 if (state == pci_channel_io_perm_failure)
6636 return PCI_ERS_RESULT_DISCONNECT;
6637
6638 if (netif_running(netdev))
6639 igc_down(adapter);
6640 pci_disable_device(pdev);
6641
6642 /* Request a slot reset. */
6643 return PCI_ERS_RESULT_NEED_RESET;
6644}
6645
6646/**
6647 * igc_io_slot_reset - called after the PCI bus has been reset.
6648 * @pdev: Pointer to PCI device
6649 *
6650 * Restart the card from scratch, as if from a cold-boot. Implementation
6651 * resembles the first-half of the igc_resume routine.
6652 **/
6653static pci_ers_result_t igc_io_slot_reset(struct pci_dev *pdev)
6654{
6655 struct net_device *netdev = pci_get_drvdata(pdev);
6656 struct igc_adapter *adapter = netdev_priv(netdev);
6657 struct igc_hw *hw = &adapter->hw;
6658 pci_ers_result_t result;
6659
6660 if (pci_enable_device_mem(pdev)) {
25f06eff 6661 netdev_err(netdev, "Could not re-enable PCI device after reset\n");
bc23aa94
SN
6662 result = PCI_ERS_RESULT_DISCONNECT;
6663 } else {
6664 pci_set_master(pdev);
6665 pci_restore_state(pdev);
6666 pci_save_state(pdev);
6667
6668 pci_enable_wake(pdev, PCI_D3hot, 0);
6669 pci_enable_wake(pdev, PCI_D3cold, 0);
6670
6671 /* In case of PCI error, adapter loses its HW address
6672 * so we should re-assign it here.
6673 */
6674 hw->hw_addr = adapter->io_addr;
6675
6676 igc_reset(adapter);
6677 wr32(IGC_WUS, ~0);
6678 result = PCI_ERS_RESULT_RECOVERED;
6679 }
6680
6681 return result;
6682}
6683
6684/**
6685 * igc_io_resume - called when traffic can start to flow again.
6686 * @pdev: Pointer to PCI device
6687 *
6688 * This callback is called when the error recovery driver tells us that
6689 * its OK to resume normal operation. Implementation resembles the
6690 * second-half of the igc_resume routine.
6691 */
6692static void igc_io_resume(struct pci_dev *pdev)
6693{
6694 struct net_device *netdev = pci_get_drvdata(pdev);
6695 struct igc_adapter *adapter = netdev_priv(netdev);
6696
6697 rtnl_lock();
6698 if (netif_running(netdev)) {
6699 if (igc_open(netdev)) {
25f06eff 6700 netdev_err(netdev, "igc_open failed after reset\n");
bc23aa94
SN
6701 return;
6702 }
6703 }
6704
6705 netif_device_attach(netdev);
6706
6707 /* let the f/w know that the h/w is now under the control of the
6708 * driver.
6709 */
6710 igc_get_hw_control(adapter);
6711 rtnl_unlock();
6712}
6713
6714static const struct pci_error_handlers igc_err_handler = {
6715 .error_detected = igc_io_error_detected,
6716 .slot_reset = igc_io_slot_reset,
6717 .resume = igc_io_resume,
6718};
6719
9513d2a5
SN
6720#ifdef CONFIG_PM
6721static const struct dev_pm_ops igc_pm_ops = {
6722 SET_SYSTEM_SLEEP_PM_OPS(igc_suspend, igc_resume)
6723 SET_RUNTIME_PM_OPS(igc_runtime_suspend, igc_runtime_resume,
6724 igc_runtime_idle)
6725};
6726#endif
6727
d89f8841
SN
6728static struct pci_driver igc_driver = {
6729 .name = igc_driver_name,
6730 .id_table = igc_pci_tbl,
6731 .probe = igc_probe,
6732 .remove = igc_remove,
9513d2a5
SN
6733#ifdef CONFIG_PM
6734 .driver.pm = &igc_pm_ops,
6735#endif
6736 .shutdown = igc_shutdown,
bc23aa94 6737 .err_handler = &igc_err_handler,
d89f8841
SN
6738};
6739
8c5ad0da
SN
6740/**
6741 * igc_reinit_queues - return error
6742 * @adapter: pointer to adapter structure
6743 */
6744int igc_reinit_queues(struct igc_adapter *adapter)
6745{
6746 struct net_device *netdev = adapter->netdev;
8c5ad0da
SN
6747 int err = 0;
6748
6749 if (netif_running(netdev))
6750 igc_close(netdev);
6751
6752 igc_reset_interrupt_capability(adapter);
6753
6754 if (igc_init_interrupt_scheme(adapter, true)) {
25f06eff 6755 netdev_err(netdev, "Unable to allocate memory for queues\n");
8c5ad0da
SN
6756 return -ENOMEM;
6757 }
6758
6759 if (netif_running(netdev))
6760 err = igc_open(netdev);
6761
6762 return err;
6763}
6764
c0071c7a
SN
6765/**
6766 * igc_get_hw_dev - return device
6767 * @hw: pointer to hardware structure
6768 *
6769 * used by hardware layer to print debugging information
6770 */
6771struct net_device *igc_get_hw_dev(struct igc_hw *hw)
6772{
6773 struct igc_adapter *adapter = hw->back;
6774
6775 return adapter->netdev;
6776}
6777
fc9df2a0
AG
6778static void igc_disable_rx_ring_hw(struct igc_ring *ring)
6779{
6780 struct igc_hw *hw = &ring->q_vector->adapter->hw;
6781 u8 idx = ring->reg_idx;
6782 u32 rxdctl;
6783
6784 rxdctl = rd32(IGC_RXDCTL(idx));
6785 rxdctl &= ~IGC_RXDCTL_QUEUE_ENABLE;
6786 rxdctl |= IGC_RXDCTL_SWFLUSH;
6787 wr32(IGC_RXDCTL(idx), rxdctl);
6788}
6789
6790void igc_disable_rx_ring(struct igc_ring *ring)
6791{
6792 igc_disable_rx_ring_hw(ring);
6793 igc_clean_rx_ring(ring);
6794}
6795
6796void igc_enable_rx_ring(struct igc_ring *ring)
6797{
6798 struct igc_adapter *adapter = ring->q_vector->adapter;
6799
6800 igc_configure_rx_ring(adapter, ring);
6801
6802 if (ring->xsk_pool)
6803 igc_alloc_rx_buffers_zc(ring, igc_desc_unused(ring));
6804 else
6805 igc_alloc_rx_buffers(ring, igc_desc_unused(ring));
6806}
6807
9acf59a7
AG
6808static void igc_disable_tx_ring_hw(struct igc_ring *ring)
6809{
6810 struct igc_hw *hw = &ring->q_vector->adapter->hw;
6811 u8 idx = ring->reg_idx;
6812 u32 txdctl;
6813
6814 txdctl = rd32(IGC_TXDCTL(idx));
6815 txdctl &= ~IGC_TXDCTL_QUEUE_ENABLE;
6816 txdctl |= IGC_TXDCTL_SWFLUSH;
6817 wr32(IGC_TXDCTL(idx), txdctl);
6818}
6819
6820void igc_disable_tx_ring(struct igc_ring *ring)
6821{
6822 igc_disable_tx_ring_hw(ring);
6823 igc_clean_tx_ring(ring);
6824}
6825
6826void igc_enable_tx_ring(struct igc_ring *ring)
6827{
6828 struct igc_adapter *adapter = ring->q_vector->adapter;
6829
6830 igc_configure_tx_ring(adapter, ring);
6831}
6832
d89f8841
SN
6833/**
6834 * igc_init_module - Driver Registration Routine
6835 *
6836 * igc_init_module is the first routine called when the driver is
6837 * loaded. All it does is register with the PCI subsystem.
6838 */
6839static int __init igc_init_module(void)
6840{
6841 int ret;
6842
34a2a3b8 6843 pr_info("%s\n", igc_driver_string);
d89f8841
SN
6844 pr_info("%s\n", igc_copyright);
6845
6846 ret = pci_register_driver(&igc_driver);
6847 return ret;
6848}
6849
6850module_init(igc_init_module);
6851
6852/**
6853 * igc_exit_module - Driver Exit Cleanup Routine
6854 *
6855 * igc_exit_module is called just before the driver is removed
6856 * from memory.
6857 */
6858static void __exit igc_exit_module(void)
6859{
6860 pci_unregister_driver(&igc_driver);
6861}
6862
6863module_exit(igc_exit_module);
6864/* igc_main.c */