ipv4: Fix fib_trie rebalancing, part 2
[linux-2.6-block.git] / drivers / net / ixgbe / ixgbe_main.c
CommitLineData
9a799d71
AK
1/*******************************************************************************
2
3 Intel 10 Gigabit PCI Express Linux driver
3efac5a0 4 Copyright(c) 1999 - 2009 Intel Corporation.
9a799d71
AK
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
9a799d71
AK
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26*******************************************************************************/
27
28#include <linux/types.h>
29#include <linux/module.h>
30#include <linux/pci.h>
31#include <linux/netdevice.h>
32#include <linux/vmalloc.h>
33#include <linux/string.h>
34#include <linux/in.h>
35#include <linux/ip.h>
36#include <linux/tcp.h>
37#include <linux/ipv6.h>
38#include <net/checksum.h>
39#include <net/ip6_checksum.h>
40#include <linux/ethtool.h>
41#include <linux/if_vlan.h>
eacd73f7 42#include <scsi/fc/fc_fcoe.h>
9a799d71
AK
43
44#include "ixgbe.h"
45#include "ixgbe_common.h"
46
47char ixgbe_driver_name[] = "ixgbe";
9c8eb720 48static const char ixgbe_driver_string[] =
b4617240 49 "Intel(R) 10 Gigabit PCI Express Network Driver";
9a799d71 50
a1c1db39 51#define DRV_VERSION "2.0.34-k2"
9c8eb720 52const char ixgbe_driver_version[] = DRV_VERSION;
3efac5a0 53static char ixgbe_copyright[] = "Copyright (c) 1999-2009 Intel Corporation.";
9a799d71
AK
54
55static const struct ixgbe_info *ixgbe_info_tbl[] = {
b4617240 56 [board_82598] = &ixgbe_82598_info,
e8e26350 57 [board_82599] = &ixgbe_82599_info,
9a799d71
AK
58};
59
60/* ixgbe_pci_tbl - PCI Device ID Table
61 *
62 * Wildcard entries (PCI_ANY_ID) should come last
63 * Last entry must be all 0s
64 *
65 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
66 * Class, Class Mask, private data (not used) }
67 */
68static struct pci_device_id ixgbe_pci_tbl[] = {
1e336d0f
DS
69 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598),
70 board_82598 },
9a799d71 71 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_DUAL_PORT),
3957d63d 72 board_82598 },
9a799d71 73 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_SINGLE_PORT),
3957d63d 74 board_82598 },
0befdb3e
JB
75 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AT),
76 board_82598 },
9a799d71 77 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_CX4),
3957d63d 78 board_82598 },
8d792cd9
JB
79 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_CX4_DUAL_PORT),
80 board_82598 },
c4900be0
DS
81 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_DA_DUAL_PORT),
82 board_82598 },
83 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM),
84 board_82598 },
b95f5fcb
JB
85 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_XF_LR),
86 board_82598 },
c4900be0
DS
87 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_SFP_LOM),
88 board_82598 },
2f21bdd3
DS
89 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_BX),
90 board_82598 },
e8e26350
PW
91 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KX4),
92 board_82599 },
1fcf03e6
PWJ
93 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_XAUI_LOM),
94 board_82599 },
e8e26350
PW
95 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP),
96 board_82599 },
9a799d71
AK
97
98 /* required last entry */
99 {0, }
100};
101MODULE_DEVICE_TABLE(pci, ixgbe_pci_tbl);
102
5dd2d332 103#ifdef CONFIG_IXGBE_DCA
bd0362dd 104static int ixgbe_notify_dca(struct notifier_block *, unsigned long event,
b4617240 105 void *p);
bd0362dd
JC
106static struct notifier_block dca_notifier = {
107 .notifier_call = ixgbe_notify_dca,
108 .next = NULL,
109 .priority = 0
110};
111#endif
112
9a799d71
AK
113MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
114MODULE_DESCRIPTION("Intel(R) 10 Gigabit PCI Express Network Driver");
115MODULE_LICENSE("GPL");
116MODULE_VERSION(DRV_VERSION);
117
118#define DEFAULT_DEBUG_LEVEL_SHIFT 3
119
5eba3699
AV
120static void ixgbe_release_hw_control(struct ixgbe_adapter *adapter)
121{
122 u32 ctrl_ext;
123
124 /* Let firmware take over control of h/w */
125 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
126 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT,
b4617240 127 ctrl_ext & ~IXGBE_CTRL_EXT_DRV_LOAD);
5eba3699
AV
128}
129
130static void ixgbe_get_hw_control(struct ixgbe_adapter *adapter)
131{
132 u32 ctrl_ext;
133
134 /* Let firmware know the driver has taken over */
135 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
136 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT,
b4617240 137 ctrl_ext | IXGBE_CTRL_EXT_DRV_LOAD);
5eba3699 138}
9a799d71 139
e8e26350
PW
140/*
141 * ixgbe_set_ivar - set the IVAR registers, mapping interrupt causes to vectors
142 * @adapter: pointer to adapter struct
143 * @direction: 0 for Rx, 1 for Tx, -1 for other causes
144 * @queue: queue to map the corresponding interrupt to
145 * @msix_vector: the vector to map to the corresponding queue
146 *
147 */
148static void ixgbe_set_ivar(struct ixgbe_adapter *adapter, s8 direction,
149 u8 queue, u8 msix_vector)
9a799d71
AK
150{
151 u32 ivar, index;
e8e26350
PW
152 struct ixgbe_hw *hw = &adapter->hw;
153 switch (hw->mac.type) {
154 case ixgbe_mac_82598EB:
155 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
156 if (direction == -1)
157 direction = 0;
158 index = (((direction * 64) + queue) >> 2) & 0x1F;
159 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
160 ivar &= ~(0xFF << (8 * (queue & 0x3)));
161 ivar |= (msix_vector << (8 * (queue & 0x3)));
162 IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar);
163 break;
164 case ixgbe_mac_82599EB:
165 if (direction == -1) {
166 /* other causes */
167 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
168 index = ((queue & 1) * 8);
169 ivar = IXGBE_READ_REG(&adapter->hw, IXGBE_IVAR_MISC);
170 ivar &= ~(0xFF << index);
171 ivar |= (msix_vector << index);
172 IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR_MISC, ivar);
173 break;
174 } else {
175 /* tx or rx causes */
176 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
177 index = ((16 * (queue & 1)) + (8 * direction));
178 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(queue >> 1));
179 ivar &= ~(0xFF << index);
180 ivar |= (msix_vector << index);
181 IXGBE_WRITE_REG(hw, IXGBE_IVAR(queue >> 1), ivar);
182 break;
183 }
184 default:
185 break;
186 }
9a799d71
AK
187}
188
fe49f04a
AD
189static inline void ixgbe_irq_rearm_queues(struct ixgbe_adapter *adapter,
190 u64 qmask)
191{
192 u32 mask;
193
194 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
195 mask = (IXGBE_EIMS_RTX_QUEUE & qmask);
196 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
197 } else {
198 mask = (qmask & 0xFFFFFFFF);
199 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask);
200 mask = (qmask >> 32);
201 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1), mask);
202 }
203}
204
9a799d71 205static void ixgbe_unmap_and_free_tx_resource(struct ixgbe_adapter *adapter,
b4617240
PW
206 struct ixgbe_tx_buffer
207 *tx_buffer_info)
9a799d71 208{
44df32c5 209 tx_buffer_info->dma = 0;
9a799d71 210 if (tx_buffer_info->skb) {
44df32c5
AD
211 skb_dma_unmap(&adapter->pdev->dev, tx_buffer_info->skb,
212 DMA_TO_DEVICE);
9a799d71
AK
213 dev_kfree_skb_any(tx_buffer_info->skb);
214 tx_buffer_info->skb = NULL;
215 }
44df32c5 216 tx_buffer_info->time_stamp = 0;
9a799d71
AK
217 /* tx_buffer_info must be completely set up in the transmit path */
218}
219
220static inline bool ixgbe_check_tx_hang(struct ixgbe_adapter *adapter,
b4617240
PW
221 struct ixgbe_ring *tx_ring,
222 unsigned int eop)
9a799d71 223{
e01c31a5 224 struct ixgbe_hw *hw = &adapter->hw;
e01c31a5 225
9a799d71 226 /* Detect a transmit hang in hardware, this serializes the
e01c31a5 227 * check with the clearing of time_stamp and movement of eop */
9a799d71 228 adapter->detect_tx_hung = false;
44df32c5 229 if (tx_ring->tx_buffer_info[eop].time_stamp &&
9a799d71
AK
230 time_after(jiffies, tx_ring->tx_buffer_info[eop].time_stamp + HZ) &&
231 !(IXGBE_READ_REG(&adapter->hw, IXGBE_TFCS) & IXGBE_TFCS_TXOFF)) {
232 /* detected Tx unit hang */
e01c31a5
JB
233 union ixgbe_adv_tx_desc *tx_desc;
234 tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop);
9a799d71 235 DPRINTK(DRV, ERR, "Detected Tx Unit Hang\n"
e01c31a5
JB
236 " Tx Queue <%d>\n"
237 " TDH, TDT <%x>, <%x>\n"
9a799d71
AK
238 " next_to_use <%x>\n"
239 " next_to_clean <%x>\n"
240 "tx_buffer_info[next_to_clean]\n"
241 " time_stamp <%lx>\n"
e01c31a5
JB
242 " jiffies <%lx>\n",
243 tx_ring->queue_index,
44df32c5
AD
244 IXGBE_READ_REG(hw, tx_ring->head),
245 IXGBE_READ_REG(hw, tx_ring->tail),
e01c31a5
JB
246 tx_ring->next_to_use, eop,
247 tx_ring->tx_buffer_info[eop].time_stamp, jiffies);
9a799d71
AK
248 return true;
249 }
250
251 return false;
252}
253
b4617240
PW
254#define IXGBE_MAX_TXD_PWR 14
255#define IXGBE_MAX_DATA_PER_TXD (1 << IXGBE_MAX_TXD_PWR)
e092be60
AV
256
257/* Tx Descriptors needed, worst case */
258#define TXD_USE_COUNT(S) (((S) >> IXGBE_MAX_TXD_PWR) + \
259 (((S) & (IXGBE_MAX_DATA_PER_TXD - 1)) ? 1 : 0))
260#define DESC_NEEDED (TXD_USE_COUNT(IXGBE_MAX_DATA_PER_TXD) /* skb->data */ + \
b4617240 261 MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE) + 1) /* for context */
e092be60 262
e01c31a5
JB
263static void ixgbe_tx_timeout(struct net_device *netdev);
264
9a799d71
AK
265/**
266 * ixgbe_clean_tx_irq - Reclaim resources after transmit completes
fe49f04a 267 * @q_vector: structure containing interrupt and ring information
e01c31a5 268 * @tx_ring: tx ring to clean
9a799d71 269 **/
fe49f04a 270static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
e01c31a5 271 struct ixgbe_ring *tx_ring)
9a799d71 272{
fe49f04a 273 struct ixgbe_adapter *adapter = q_vector->adapter;
e01c31a5 274 struct net_device *netdev = adapter->netdev;
12207e49
PWJ
275 union ixgbe_adv_tx_desc *tx_desc, *eop_desc;
276 struct ixgbe_tx_buffer *tx_buffer_info;
277 unsigned int i, eop, count = 0;
e01c31a5 278 unsigned int total_bytes = 0, total_packets = 0;
9a799d71
AK
279
280 i = tx_ring->next_to_clean;
12207e49
PWJ
281 eop = tx_ring->tx_buffer_info[i].next_to_watch;
282 eop_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop);
283
284 while ((eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)) &&
9a1a69ad 285 (count < tx_ring->work_limit)) {
12207e49
PWJ
286 bool cleaned = false;
287 for ( ; !cleaned; count++) {
288 struct sk_buff *skb;
9a799d71
AK
289 tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, i);
290 tx_buffer_info = &tx_ring->tx_buffer_info[i];
12207e49 291 cleaned = (i == eop);
e01c31a5 292 skb = tx_buffer_info->skb;
9a799d71 293
12207e49 294 if (cleaned && skb) {
e092be60 295 unsigned int segs, bytecount;
3d8fd385 296 unsigned int hlen = skb_headlen(skb);
e01c31a5
JB
297
298 /* gso_segs is currently only valid for tcp */
e092be60 299 segs = skb_shinfo(skb)->gso_segs ?: 1;
3d8fd385
YZ
300#ifdef IXGBE_FCOE
301 /* adjust for FCoE Sequence Offload */
302 if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED)
303 && (skb->protocol == htons(ETH_P_FCOE)) &&
304 skb_is_gso(skb)) {
305 hlen = skb_transport_offset(skb) +
306 sizeof(struct fc_frame_header) +
307 sizeof(struct fcoe_crc_eof);
308 segs = DIV_ROUND_UP(skb->len - hlen,
309 skb_shinfo(skb)->gso_size);
310 }
311#endif /* IXGBE_FCOE */
e092be60 312 /* multiply data chunks by size of headers */
3d8fd385 313 bytecount = ((segs - 1) * hlen) + skb->len;
e01c31a5
JB
314 total_packets += segs;
315 total_bytes += bytecount;
e092be60 316 }
e01c31a5 317
9a799d71 318 ixgbe_unmap_and_free_tx_resource(adapter,
e01c31a5 319 tx_buffer_info);
9a799d71 320
12207e49
PWJ
321 tx_desc->wb.status = 0;
322
9a799d71
AK
323 i++;
324 if (i == tx_ring->count)
325 i = 0;
e01c31a5 326 }
12207e49
PWJ
327
328 eop = tx_ring->tx_buffer_info[i].next_to_watch;
329 eop_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop);
330 }
331
9a799d71
AK
332 tx_ring->next_to_clean = i;
333
e092be60 334#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
e01c31a5
JB
335 if (unlikely(count && netif_carrier_ok(netdev) &&
336 (IXGBE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
e092be60
AV
337 /* Make sure that anybody stopping the queue after this
338 * sees the new next_to_clean.
339 */
340 smp_mb();
30eba97a
AV
341 if (__netif_subqueue_stopped(netdev, tx_ring->queue_index) &&
342 !test_bit(__IXGBE_DOWN, &adapter->state)) {
343 netif_wake_subqueue(netdev, tx_ring->queue_index);
e01c31a5 344 ++adapter->restart_queue;
30eba97a 345 }
e092be60 346 }
9a799d71 347
e01c31a5
JB
348 if (adapter->detect_tx_hung) {
349 if (ixgbe_check_tx_hang(adapter, tx_ring, i)) {
350 /* schedule immediate reset if we believe we hung */
351 DPRINTK(PROBE, INFO,
352 "tx hang %d detected, resetting adapter\n",
353 adapter->tx_timeout_count + 1);
354 ixgbe_tx_timeout(adapter->netdev);
355 }
356 }
9a799d71 357
e01c31a5 358 /* re-arm the interrupt */
fe49f04a
AD
359 if (count >= tx_ring->work_limit)
360 ixgbe_irq_rearm_queues(adapter, ((u64)1 << q_vector->v_idx));
9a799d71 361
e01c31a5
JB
362 tx_ring->total_bytes += total_bytes;
363 tx_ring->total_packets += total_packets;
e01c31a5 364 tx_ring->stats.packets += total_packets;
12207e49 365 tx_ring->stats.bytes += total_bytes;
e01c31a5
JB
366 adapter->net_stats.tx_bytes += total_bytes;
367 adapter->net_stats.tx_packets += total_packets;
9a1a69ad 368 return (count < tx_ring->work_limit);
9a799d71
AK
369}
370
5dd2d332 371#ifdef CONFIG_IXGBE_DCA
bd0362dd 372static void ixgbe_update_rx_dca(struct ixgbe_adapter *adapter,
b4617240 373 struct ixgbe_ring *rx_ring)
bd0362dd
JC
374{
375 u32 rxctrl;
376 int cpu = get_cpu();
3a581073 377 int q = rx_ring - adapter->rx_ring;
bd0362dd 378
3a581073 379 if (rx_ring->cpu != cpu) {
bd0362dd 380 rxctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_DCA_RXCTRL(q));
e8e26350
PW
381 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
382 rxctrl &= ~IXGBE_DCA_RXCTRL_CPUID_MASK;
383 rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
384 } else if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
385 rxctrl &= ~IXGBE_DCA_RXCTRL_CPUID_MASK_82599;
386 rxctrl |= (dca3_get_tag(&adapter->pdev->dev, cpu) <<
387 IXGBE_DCA_RXCTRL_CPUID_SHIFT_82599);
388 }
bd0362dd
JC
389 rxctrl |= IXGBE_DCA_RXCTRL_DESC_DCA_EN;
390 rxctrl |= IXGBE_DCA_RXCTRL_HEAD_DCA_EN;
15005a32
DS
391 rxctrl &= ~(IXGBE_DCA_RXCTRL_DESC_RRO_EN);
392 rxctrl &= ~(IXGBE_DCA_RXCTRL_DESC_WRO_EN |
e8e26350 393 IXGBE_DCA_RXCTRL_DESC_HSRO_EN);
bd0362dd 394 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_RXCTRL(q), rxctrl);
3a581073 395 rx_ring->cpu = cpu;
bd0362dd
JC
396 }
397 put_cpu();
398}
399
400static void ixgbe_update_tx_dca(struct ixgbe_adapter *adapter,
b4617240 401 struct ixgbe_ring *tx_ring)
bd0362dd
JC
402{
403 u32 txctrl;
404 int cpu = get_cpu();
3a581073 405 int q = tx_ring - adapter->tx_ring;
bd0362dd 406
3a581073 407 if (tx_ring->cpu != cpu) {
bd0362dd 408 txctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_DCA_TXCTRL(q));
e8e26350
PW
409 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
410 txctrl &= ~IXGBE_DCA_TXCTRL_CPUID_MASK;
411 txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
412 } else if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
413 txctrl &= ~IXGBE_DCA_TXCTRL_CPUID_MASK_82599;
414 txctrl |= (dca3_get_tag(&adapter->pdev->dev, cpu) <<
415 IXGBE_DCA_TXCTRL_CPUID_SHIFT_82599);
416 }
bd0362dd
JC
417 txctrl |= IXGBE_DCA_TXCTRL_DESC_DCA_EN;
418 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_TXCTRL(q), txctrl);
3a581073 419 tx_ring->cpu = cpu;
bd0362dd
JC
420 }
421 put_cpu();
422}
423
424static void ixgbe_setup_dca(struct ixgbe_adapter *adapter)
425{
426 int i;
427
428 if (!(adapter->flags & IXGBE_FLAG_DCA_ENABLED))
429 return;
430
e35ec126
AD
431 /* always use CB2 mode, difference is masked in the CB driver */
432 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 2);
433
bd0362dd
JC
434 for (i = 0; i < adapter->num_tx_queues; i++) {
435 adapter->tx_ring[i].cpu = -1;
436 ixgbe_update_tx_dca(adapter, &adapter->tx_ring[i]);
437 }
438 for (i = 0; i < adapter->num_rx_queues; i++) {
439 adapter->rx_ring[i].cpu = -1;
440 ixgbe_update_rx_dca(adapter, &adapter->rx_ring[i]);
441 }
442}
443
444static int __ixgbe_notify_dca(struct device *dev, void *data)
445{
446 struct net_device *netdev = dev_get_drvdata(dev);
447 struct ixgbe_adapter *adapter = netdev_priv(netdev);
448 unsigned long event = *(unsigned long *)data;
449
450 switch (event) {
451 case DCA_PROVIDER_ADD:
96b0e0f6
JB
452 /* if we're already enabled, don't do it again */
453 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
454 break;
652f093f 455 if (dca_add_requester(dev) == 0) {
96b0e0f6 456 adapter->flags |= IXGBE_FLAG_DCA_ENABLED;
bd0362dd
JC
457 ixgbe_setup_dca(adapter);
458 break;
459 }
460 /* Fall Through since DCA is disabled. */
461 case DCA_PROVIDER_REMOVE:
462 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
463 dca_remove_requester(dev);
464 adapter->flags &= ~IXGBE_FLAG_DCA_ENABLED;
465 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 1);
466 }
467 break;
468 }
469
652f093f 470 return 0;
bd0362dd
JC
471}
472
5dd2d332 473#endif /* CONFIG_IXGBE_DCA */
9a799d71
AK
474/**
475 * ixgbe_receive_skb - Send a completed packet up the stack
476 * @adapter: board private structure
477 * @skb: packet to send up
177db6ff
MC
478 * @status: hardware indication of status of receive
479 * @rx_ring: rx descriptor ring (for a specific queue) to setup
480 * @rx_desc: rx descriptor
9a799d71 481 **/
78b6f4ce 482static void ixgbe_receive_skb(struct ixgbe_q_vector *q_vector,
b4617240 483 struct sk_buff *skb, u8 status,
fdaff1ce 484 struct ixgbe_ring *ring,
177db6ff 485 union ixgbe_adv_rx_desc *rx_desc)
9a799d71 486{
78b6f4ce
HX
487 struct ixgbe_adapter *adapter = q_vector->adapter;
488 struct napi_struct *napi = &q_vector->napi;
177db6ff
MC
489 bool is_vlan = (status & IXGBE_RXD_STAT_VP);
490 u16 tag = le16_to_cpu(rx_desc->wb.upper.vlan);
9a799d71 491
fdaff1ce 492 skb_record_rx_queue(skb, ring->queue_index);
182ff8df 493 if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL)) {
2f90b865 494 if (adapter->vlgrp && is_vlan && (tag != 0))
78b6f4ce 495 vlan_gro_receive(napi, adapter->vlgrp, tag, skb);
9a799d71 496 else
78b6f4ce 497 napi_gro_receive(napi, skb);
177db6ff 498 } else {
182ff8df
AD
499 if (adapter->vlgrp && is_vlan && (tag != 0))
500 vlan_hwaccel_rx(skb, adapter->vlgrp, tag);
501 else
502 netif_rx(skb);
9a799d71
AK
503 }
504}
505
e59bd25d
AV
506/**
507 * ixgbe_rx_checksum - indicate in skb if hw indicated a good cksum
508 * @adapter: address of board private structure
509 * @status_err: hardware indication of status of receive
510 * @skb: skb currently being received and modified
511 **/
9a799d71 512static inline void ixgbe_rx_checksum(struct ixgbe_adapter *adapter,
712744be 513 u32 status_err, struct sk_buff *skb)
9a799d71
AK
514{
515 skb->ip_summed = CHECKSUM_NONE;
516
712744be
JB
517 /* Rx csum disabled */
518 if (!(adapter->flags & IXGBE_FLAG_RX_CSUM_ENABLED))
9a799d71 519 return;
e59bd25d
AV
520
521 /* if IP and error */
522 if ((status_err & IXGBE_RXD_STAT_IPCS) &&
523 (status_err & IXGBE_RXDADV_ERR_IPE)) {
9a799d71
AK
524 adapter->hw_csum_rx_error++;
525 return;
526 }
e59bd25d
AV
527
528 if (!(status_err & IXGBE_RXD_STAT_L4CS))
529 return;
530
531 if (status_err & IXGBE_RXDADV_ERR_TCPE) {
532 adapter->hw_csum_rx_error++;
533 return;
534 }
535
9a799d71 536 /* It must be a TCP or UDP packet with a valid checksum */
e59bd25d 537 skb->ip_summed = CHECKSUM_UNNECESSARY;
9a799d71
AK
538 adapter->hw_csum_rx_good++;
539}
540
e8e26350
PW
541static inline void ixgbe_release_rx_desc(struct ixgbe_hw *hw,
542 struct ixgbe_ring *rx_ring, u32 val)
543{
544 /*
545 * Force memory writes to complete before letting h/w
546 * know there are new descriptors to fetch. (Only
547 * applicable for weak-ordered memory model archs,
548 * such as IA-64).
549 */
550 wmb();
551 IXGBE_WRITE_REG(hw, IXGBE_RDT(rx_ring->reg_idx), val);
552}
553
9a799d71
AK
554/**
555 * ixgbe_alloc_rx_buffers - Replace used receive buffers; packet split
556 * @adapter: address of board private structure
557 **/
558static void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter,
7c6e0a43
JB
559 struct ixgbe_ring *rx_ring,
560 int cleaned_count)
9a799d71 561{
9a799d71
AK
562 struct pci_dev *pdev = adapter->pdev;
563 union ixgbe_adv_rx_desc *rx_desc;
3a581073 564 struct ixgbe_rx_buffer *bi;
9a799d71 565 unsigned int i;
e8e26350 566 unsigned int bufsz = rx_ring->rx_buf_len + NET_IP_ALIGN;
9a799d71
AK
567
568 i = rx_ring->next_to_use;
3a581073 569 bi = &rx_ring->rx_buffer_info[i];
9a799d71
AK
570
571 while (cleaned_count--) {
572 rx_desc = IXGBE_RX_DESC_ADV(*rx_ring, i);
573
762f4c57 574 if (!bi->page_dma &&
3a581073 575 (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED)) {
3a581073 576 if (!bi->page) {
762f4c57
JB
577 bi->page = alloc_page(GFP_ATOMIC);
578 if (!bi->page) {
579 adapter->alloc_rx_page_failed++;
580 goto no_buffers;
581 }
582 bi->page_offset = 0;
583 } else {
584 /* use a half page if we're re-using */
585 bi->page_offset ^= (PAGE_SIZE / 2);
9a799d71 586 }
762f4c57
JB
587
588 bi->page_dma = pci_map_page(pdev, bi->page,
589 bi->page_offset,
590 (PAGE_SIZE / 2),
591 PCI_DMA_FROMDEVICE);
9a799d71
AK
592 }
593
3a581073 594 if (!bi->skb) {
5ecc3614 595 struct sk_buff *skb;
e8e26350 596 skb = netdev_alloc_skb(adapter->netdev, bufsz);
9a799d71
AK
597
598 if (!skb) {
599 adapter->alloc_rx_buff_failed++;
600 goto no_buffers;
601 }
602
603 /*
604 * Make buffer alignment 2 beyond a 16 byte boundary
605 * this will result in a 16 byte aligned IP header after
606 * the 14 byte MAC header is removed
607 */
608 skb_reserve(skb, NET_IP_ALIGN);
609
3a581073 610 bi->skb = skb;
e8e26350 611 bi->dma = pci_map_single(pdev, skb->data, bufsz,
3a581073 612 PCI_DMA_FROMDEVICE);
9a799d71
AK
613 }
614 /* Refresh the desc even if buffer_addrs didn't change because
615 * each write-back erases this info. */
616 if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
3a581073
JB
617 rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma);
618 rx_desc->read.hdr_addr = cpu_to_le64(bi->dma);
9a799d71 619 } else {
3a581073 620 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma);
9a799d71
AK
621 }
622
623 i++;
624 if (i == rx_ring->count)
625 i = 0;
3a581073 626 bi = &rx_ring->rx_buffer_info[i];
9a799d71 627 }
7c6e0a43 628
9a799d71
AK
629no_buffers:
630 if (rx_ring->next_to_use != i) {
631 rx_ring->next_to_use = i;
632 if (i-- == 0)
633 i = (rx_ring->count - 1);
634
e8e26350 635 ixgbe_release_rx_desc(&adapter->hw, rx_ring, i);
9a799d71
AK
636 }
637}
638
7c6e0a43
JB
639static inline u16 ixgbe_get_hdr_info(union ixgbe_adv_rx_desc *rx_desc)
640{
641 return rx_desc->wb.lower.lo_dword.hs_rss.hdr_info;
642}
643
644static inline u16 ixgbe_get_pkt_info(union ixgbe_adv_rx_desc *rx_desc)
645{
646 return rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
647}
648
f8212f97
AD
649static inline u32 ixgbe_get_rsc_count(union ixgbe_adv_rx_desc *rx_desc)
650{
651 return (le32_to_cpu(rx_desc->wb.lower.lo_dword.data) &
652 IXGBE_RXDADV_RSCCNT_MASK) >>
653 IXGBE_RXDADV_RSCCNT_SHIFT;
654}
655
656/**
657 * ixgbe_transform_rsc_queue - change rsc queue into a full packet
658 * @skb: pointer to the last skb in the rsc queue
659 *
660 * This function changes a queue full of hw rsc buffers into a completed
661 * packet. It uses the ->prev pointers to find the first packet and then
662 * turns it into the frag list owner.
663 **/
664static inline struct sk_buff *ixgbe_transform_rsc_queue(struct sk_buff *skb)
665{
666 unsigned int frag_list_size = 0;
667
668 while (skb->prev) {
669 struct sk_buff *prev = skb->prev;
670 frag_list_size += skb->len;
671 skb->prev = NULL;
672 skb = prev;
673 }
674
675 skb_shinfo(skb)->frag_list = skb->next;
676 skb->next = NULL;
677 skb->len += frag_list_size;
678 skb->data_len += frag_list_size;
679 skb->truesize += frag_list_size;
680 return skb;
681}
682
78b6f4ce 683static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
b4617240
PW
684 struct ixgbe_ring *rx_ring,
685 int *work_done, int work_to_do)
9a799d71 686{
78b6f4ce 687 struct ixgbe_adapter *adapter = q_vector->adapter;
9a799d71
AK
688 struct pci_dev *pdev = adapter->pdev;
689 union ixgbe_adv_rx_desc *rx_desc, *next_rxd;
690 struct ixgbe_rx_buffer *rx_buffer_info, *next_buffer;
691 struct sk_buff *skb;
f8212f97 692 unsigned int i, rsc_count = 0;
7c6e0a43 693 u32 len, staterr;
177db6ff
MC
694 u16 hdr_info;
695 bool cleaned = false;
9a799d71 696 int cleaned_count = 0;
d2f4fbe2 697 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
3d8fd385
YZ
698#ifdef IXGBE_FCOE
699 int ddp_bytes = 0;
700#endif /* IXGBE_FCOE */
9a799d71
AK
701
702 i = rx_ring->next_to_clean;
9a799d71
AK
703 rx_desc = IXGBE_RX_DESC_ADV(*rx_ring, i);
704 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
705 rx_buffer_info = &rx_ring->rx_buffer_info[i];
9a799d71
AK
706
707 while (staterr & IXGBE_RXD_STAT_DD) {
7c6e0a43 708 u32 upper_len = 0;
9a799d71
AK
709 if (*work_done >= work_to_do)
710 break;
711 (*work_done)++;
712
713 if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
7c6e0a43
JB
714 hdr_info = le16_to_cpu(ixgbe_get_hdr_info(rx_desc));
715 len = (hdr_info & IXGBE_RXDADV_HDRBUFLEN_MASK) >>
762f4c57 716 IXGBE_RXDADV_HDRBUFLEN_SHIFT;
9a799d71
AK
717 if (hdr_info & IXGBE_RXDADV_SPH)
718 adapter->rx_hdr_split++;
719 if (len > IXGBE_RX_HDR_SIZE)
720 len = IXGBE_RX_HDR_SIZE;
721 upper_len = le16_to_cpu(rx_desc->wb.upper.length);
7c6e0a43 722 } else {
9a799d71 723 len = le16_to_cpu(rx_desc->wb.upper.length);
7c6e0a43 724 }
9a799d71
AK
725
726 cleaned = true;
727 skb = rx_buffer_info->skb;
728 prefetch(skb->data - NET_IP_ALIGN);
729 rx_buffer_info->skb = NULL;
730
21fa4e66 731 if (rx_buffer_info->dma) {
9a799d71 732 pci_unmap_single(pdev, rx_buffer_info->dma,
5ecc3614 733 rx_ring->rx_buf_len,
b4617240 734 PCI_DMA_FROMDEVICE);
9a799d71
AK
735 skb_put(skb, len);
736 }
737
738 if (upper_len) {
739 pci_unmap_page(pdev, rx_buffer_info->page_dma,
762f4c57 740 PAGE_SIZE / 2, PCI_DMA_FROMDEVICE);
9a799d71
AK
741 rx_buffer_info->page_dma = 0;
742 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
762f4c57
JB
743 rx_buffer_info->page,
744 rx_buffer_info->page_offset,
745 upper_len);
746
747 if ((rx_ring->rx_buf_len > (PAGE_SIZE / 2)) ||
748 (page_count(rx_buffer_info->page) != 1))
749 rx_buffer_info->page = NULL;
750 else
751 get_page(rx_buffer_info->page);
9a799d71
AK
752
753 skb->len += upper_len;
754 skb->data_len += upper_len;
755 skb->truesize += upper_len;
756 }
757
758 i++;
759 if (i == rx_ring->count)
760 i = 0;
9a799d71
AK
761
762 next_rxd = IXGBE_RX_DESC_ADV(*rx_ring, i);
763 prefetch(next_rxd);
9a799d71 764 cleaned_count++;
f8212f97 765
df647b5c 766 if (adapter->flags & IXGBE_FLAG2_RSC_CAPABLE)
f8212f97
AD
767 rsc_count = ixgbe_get_rsc_count(rx_desc);
768
769 if (rsc_count) {
770 u32 nextp = (staterr & IXGBE_RXDADV_NEXTP_MASK) >>
771 IXGBE_RXDADV_NEXTP_SHIFT;
772 next_buffer = &rx_ring->rx_buffer_info[nextp];
773 rx_ring->rsc_count += (rsc_count - 1);
774 } else {
775 next_buffer = &rx_ring->rx_buffer_info[i];
776 }
777
9a799d71 778 if (staterr & IXGBE_RXD_STAT_EOP) {
f8212f97
AD
779 if (skb->prev)
780 skb = ixgbe_transform_rsc_queue(skb);
9a799d71
AK
781 rx_ring->stats.packets++;
782 rx_ring->stats.bytes += skb->len;
783 } else {
f8212f97
AD
784 if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
785 rx_buffer_info->skb = next_buffer->skb;
786 rx_buffer_info->dma = next_buffer->dma;
787 next_buffer->skb = skb;
788 next_buffer->dma = 0;
789 } else {
790 skb->next = next_buffer->skb;
791 skb->next->prev = skb;
792 }
9a799d71
AK
793 adapter->non_eop_descs++;
794 goto next_desc;
795 }
796
797 if (staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK) {
798 dev_kfree_skb_irq(skb);
799 goto next_desc;
800 }
801
802 ixgbe_rx_checksum(adapter, staterr, skb);
d2f4fbe2
AV
803
804 /* probably a little skewed due to removing CRC */
805 total_rx_bytes += skb->len;
806 total_rx_packets++;
807
74ce8dd2 808 skb->protocol = eth_type_trans(skb, adapter->netdev);
332d4a7d
YZ
809#ifdef IXGBE_FCOE
810 /* if ddp, not passing to ULD unless for FCP_RSP or error */
3d8fd385
YZ
811 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
812 ddp_bytes = ixgbe_fcoe_ddp(adapter, rx_desc, skb);
813 if (!ddp_bytes)
332d4a7d 814 goto next_desc;
3d8fd385 815 }
332d4a7d 816#endif /* IXGBE_FCOE */
fdaff1ce 817 ixgbe_receive_skb(q_vector, skb, staterr, rx_ring, rx_desc);
9a799d71
AK
818
819next_desc:
820 rx_desc->wb.upper.status_error = 0;
821
822 /* return some buffers to hardware, one at a time is too slow */
823 if (cleaned_count >= IXGBE_RX_BUFFER_WRITE) {
824 ixgbe_alloc_rx_buffers(adapter, rx_ring, cleaned_count);
825 cleaned_count = 0;
826 }
827
828 /* use prefetched values */
829 rx_desc = next_rxd;
f8212f97 830 rx_buffer_info = &rx_ring->rx_buffer_info[i];
9a799d71
AK
831
832 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
177db6ff
MC
833 }
834
9a799d71
AK
835 rx_ring->next_to_clean = i;
836 cleaned_count = IXGBE_DESC_UNUSED(rx_ring);
837
838 if (cleaned_count)
839 ixgbe_alloc_rx_buffers(adapter, rx_ring, cleaned_count);
840
3d8fd385
YZ
841#ifdef IXGBE_FCOE
842 /* include DDPed FCoE data */
843 if (ddp_bytes > 0) {
844 unsigned int mss;
845
846 mss = adapter->netdev->mtu - sizeof(struct fcoe_hdr) -
847 sizeof(struct fc_frame_header) -
848 sizeof(struct fcoe_crc_eof);
849 if (mss > 512)
850 mss &= ~511;
851 total_rx_bytes += ddp_bytes;
852 total_rx_packets += DIV_ROUND_UP(ddp_bytes, mss);
853 }
854#endif /* IXGBE_FCOE */
855
f494e8fa
AV
856 rx_ring->total_packets += total_rx_packets;
857 rx_ring->total_bytes += total_rx_bytes;
858 adapter->net_stats.rx_bytes += total_rx_bytes;
859 adapter->net_stats.rx_packets += total_rx_packets;
860
9a799d71
AK
861 return cleaned;
862}
863
021230d4 864static int ixgbe_clean_rxonly(struct napi_struct *, int);
9a799d71
AK
865/**
866 * ixgbe_configure_msix - Configure MSI-X hardware
867 * @adapter: board private structure
868 *
869 * ixgbe_configure_msix sets up the hardware to properly generate MSI-X
870 * interrupts.
871 **/
872static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
873{
021230d4
AV
874 struct ixgbe_q_vector *q_vector;
875 int i, j, q_vectors, v_idx, r_idx;
876 u32 mask;
9a799d71 877
021230d4 878 q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
9a799d71 879
4df10466
JB
880 /*
881 * Populate the IVAR table and set the ITR values to the
021230d4
AV
882 * corresponding register.
883 */
884 for (v_idx = 0; v_idx < q_vectors; v_idx++) {
7a921c93 885 q_vector = adapter->q_vector[v_idx];
021230d4
AV
886 /* XXX for_each_bit(...) */
887 r_idx = find_first_bit(q_vector->rxr_idx,
b4617240 888 adapter->num_rx_queues);
021230d4
AV
889
890 for (i = 0; i < q_vector->rxr_count; i++) {
891 j = adapter->rx_ring[r_idx].reg_idx;
e8e26350 892 ixgbe_set_ivar(adapter, 0, j, v_idx);
021230d4 893 r_idx = find_next_bit(q_vector->rxr_idx,
b4617240
PW
894 adapter->num_rx_queues,
895 r_idx + 1);
021230d4
AV
896 }
897 r_idx = find_first_bit(q_vector->txr_idx,
b4617240 898 adapter->num_tx_queues);
021230d4
AV
899
900 for (i = 0; i < q_vector->txr_count; i++) {
901 j = adapter->tx_ring[r_idx].reg_idx;
e8e26350 902 ixgbe_set_ivar(adapter, 1, j, v_idx);
021230d4 903 r_idx = find_next_bit(q_vector->txr_idx,
b4617240
PW
904 adapter->num_tx_queues,
905 r_idx + 1);
021230d4
AV
906 }
907
30efa5a3 908 /* if this is a tx only vector halve the interrupt rate */
021230d4 909 if (q_vector->txr_count && !q_vector->rxr_count)
30efa5a3 910 q_vector->eitr = (adapter->eitr_param >> 1);
509ee935 911 else if (q_vector->rxr_count)
30efa5a3
JB
912 /* rx only */
913 q_vector->eitr = adapter->eitr_param;
021230d4 914
fe49f04a 915 ixgbe_write_eitr(q_vector);
9a799d71
AK
916 }
917
e8e26350
PW
918 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
919 ixgbe_set_ivar(adapter, -1, IXGBE_IVAR_OTHER_CAUSES_INDEX,
920 v_idx);
921 else if (adapter->hw.mac.type == ixgbe_mac_82599EB)
922 ixgbe_set_ivar(adapter, -1, 1, v_idx);
021230d4
AV
923 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(v_idx), 1950);
924
41fb9248 925 /* set up to autoclear timer, and the vectors */
021230d4 926 mask = IXGBE_EIMS_ENABLE_MASK;
41fb9248 927 mask &= ~(IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC);
021230d4 928 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, mask);
9a799d71
AK
929}
930
f494e8fa
AV
931enum latency_range {
932 lowest_latency = 0,
933 low_latency = 1,
934 bulk_latency = 2,
935 latency_invalid = 255
936};
937
938/**
939 * ixgbe_update_itr - update the dynamic ITR value based on statistics
940 * @adapter: pointer to adapter
941 * @eitr: eitr setting (ints per sec) to give last timeslice
942 * @itr_setting: current throttle rate in ints/second
943 * @packets: the number of packets during this measurement interval
944 * @bytes: the number of bytes during this measurement interval
945 *
946 * Stores a new ITR value based on packets and byte
947 * counts during the last interrupt. The advantage of per interrupt
948 * computation is faster updates and more accurate ITR for the current
949 * traffic pattern. Constants in this function were computed
950 * based on theoretical maximum wire speed and thresholds were set based
951 * on testing data as well as attempting to minimize response time
952 * while increasing bulk throughput.
953 * this functionality is controlled by the InterruptThrottleRate module
954 * parameter (see ixgbe_param.c)
955 **/
956static u8 ixgbe_update_itr(struct ixgbe_adapter *adapter,
b4617240
PW
957 u32 eitr, u8 itr_setting,
958 int packets, int bytes)
f494e8fa
AV
959{
960 unsigned int retval = itr_setting;
961 u32 timepassed_us;
962 u64 bytes_perint;
963
964 if (packets == 0)
965 goto update_itr_done;
966
967
968 /* simple throttlerate management
969 * 0-20MB/s lowest (100000 ints/s)
970 * 20-100MB/s low (20000 ints/s)
971 * 100-1249MB/s bulk (8000 ints/s)
972 */
973 /* what was last interrupt timeslice? */
974 timepassed_us = 1000000/eitr;
975 bytes_perint = bytes / timepassed_us; /* bytes/usec */
976
977 switch (itr_setting) {
978 case lowest_latency:
979 if (bytes_perint > adapter->eitr_low)
980 retval = low_latency;
981 break;
982 case low_latency:
983 if (bytes_perint > adapter->eitr_high)
984 retval = bulk_latency;
985 else if (bytes_perint <= adapter->eitr_low)
986 retval = lowest_latency;
987 break;
988 case bulk_latency:
989 if (bytes_perint <= adapter->eitr_high)
990 retval = low_latency;
991 break;
992 }
993
994update_itr_done:
995 return retval;
996}
997
509ee935
JB
998/**
999 * ixgbe_write_eitr - write EITR register in hardware specific way
fe49f04a 1000 * @q_vector: structure containing interrupt and ring information
509ee935
JB
1001 *
1002 * This function is made to be called by ethtool and by the driver
1003 * when it needs to update EITR registers at runtime. Hardware
1004 * specific quirks/differences are taken care of here.
1005 */
fe49f04a 1006void ixgbe_write_eitr(struct ixgbe_q_vector *q_vector)
509ee935 1007{
fe49f04a 1008 struct ixgbe_adapter *adapter = q_vector->adapter;
509ee935 1009 struct ixgbe_hw *hw = &adapter->hw;
fe49f04a
AD
1010 int v_idx = q_vector->v_idx;
1011 u32 itr_reg = EITR_INTS_PER_SEC_TO_REG(q_vector->eitr);
1012
509ee935
JB
1013 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
1014 /* must write high and low 16 bits to reset counter */
1015 itr_reg |= (itr_reg << 16);
1016 } else if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
1017 /*
1018 * set the WDIS bit to not clear the timer bits and cause an
1019 * immediate assertion of the interrupt
1020 */
1021 itr_reg |= IXGBE_EITR_CNT_WDIS;
1022 }
1023 IXGBE_WRITE_REG(hw, IXGBE_EITR(v_idx), itr_reg);
1024}
1025
f494e8fa
AV
1026static void ixgbe_set_itr_msix(struct ixgbe_q_vector *q_vector)
1027{
1028 struct ixgbe_adapter *adapter = q_vector->adapter;
f494e8fa
AV
1029 u32 new_itr;
1030 u8 current_itr, ret_itr;
fe49f04a 1031 int i, r_idx;
f494e8fa
AV
1032 struct ixgbe_ring *rx_ring, *tx_ring;
1033
1034 r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
1035 for (i = 0; i < q_vector->txr_count; i++) {
1036 tx_ring = &(adapter->tx_ring[r_idx]);
1037 ret_itr = ixgbe_update_itr(adapter, q_vector->eitr,
b4617240
PW
1038 q_vector->tx_itr,
1039 tx_ring->total_packets,
1040 tx_ring->total_bytes);
f494e8fa
AV
1041 /* if the result for this queue would decrease interrupt
1042 * rate for this vector then use that result */
30efa5a3 1043 q_vector->tx_itr = ((q_vector->tx_itr > ret_itr) ?
b4617240 1044 q_vector->tx_itr - 1 : ret_itr);
f494e8fa 1045 r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
b4617240 1046 r_idx + 1);
f494e8fa
AV
1047 }
1048
1049 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
1050 for (i = 0; i < q_vector->rxr_count; i++) {
1051 rx_ring = &(adapter->rx_ring[r_idx]);
1052 ret_itr = ixgbe_update_itr(adapter, q_vector->eitr,
b4617240
PW
1053 q_vector->rx_itr,
1054 rx_ring->total_packets,
1055 rx_ring->total_bytes);
f494e8fa
AV
1056 /* if the result for this queue would decrease interrupt
1057 * rate for this vector then use that result */
30efa5a3 1058 q_vector->rx_itr = ((q_vector->rx_itr > ret_itr) ?
b4617240 1059 q_vector->rx_itr - 1 : ret_itr);
f494e8fa 1060 r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
b4617240 1061 r_idx + 1);
f494e8fa
AV
1062 }
1063
30efa5a3 1064 current_itr = max(q_vector->rx_itr, q_vector->tx_itr);
f494e8fa
AV
1065
1066 switch (current_itr) {
1067 /* counts and packets in update_itr are dependent on these numbers */
1068 case lowest_latency:
1069 new_itr = 100000;
1070 break;
1071 case low_latency:
1072 new_itr = 20000; /* aka hwitr = ~200 */
1073 break;
1074 case bulk_latency:
1075 default:
1076 new_itr = 8000;
1077 break;
1078 }
1079
1080 if (new_itr != q_vector->eitr) {
fe49f04a
AD
1081 /* do an exponential smoothing */
1082 new_itr = ((q_vector->eitr * 90)/100) + ((new_itr * 10)/100);
509ee935
JB
1083
1084 /* save the algorithm value here, not the smoothed one */
1085 q_vector->eitr = new_itr;
fe49f04a
AD
1086
1087 ixgbe_write_eitr(q_vector);
f494e8fa
AV
1088 }
1089
1090 return;
1091}
1092
0befdb3e
JB
1093static void ixgbe_check_fan_failure(struct ixgbe_adapter *adapter, u32 eicr)
1094{
1095 struct ixgbe_hw *hw = &adapter->hw;
1096
1097 if ((adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) &&
1098 (eicr & IXGBE_EICR_GPI_SDP1)) {
1099 DPRINTK(PROBE, CRIT, "Fan has stopped, replace the adapter\n");
1100 /* write to clear the interrupt */
1101 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1);
1102 }
1103}
cf8280ee 1104
e8e26350
PW
1105static void ixgbe_check_sfp_event(struct ixgbe_adapter *adapter, u32 eicr)
1106{
1107 struct ixgbe_hw *hw = &adapter->hw;
1108
1109 if (eicr & IXGBE_EICR_GPI_SDP1) {
1110 /* Clear the interrupt */
1111 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1);
1112 schedule_work(&adapter->multispeed_fiber_task);
1113 } else if (eicr & IXGBE_EICR_GPI_SDP2) {
1114 /* Clear the interrupt */
1115 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP2);
1116 schedule_work(&adapter->sfp_config_module_task);
1117 } else {
1118 /* Interrupt isn't for us... */
1119 return;
1120 }
1121}
1122
cf8280ee
JB
1123static void ixgbe_check_lsc(struct ixgbe_adapter *adapter)
1124{
1125 struct ixgbe_hw *hw = &adapter->hw;
1126
1127 adapter->lsc_int++;
1128 adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
1129 adapter->link_check_timeout = jiffies;
1130 if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
1131 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC);
1132 schedule_work(&adapter->watchdog_task);
1133 }
1134}
1135
9a799d71
AK
1136static irqreturn_t ixgbe_msix_lsc(int irq, void *data)
1137{
1138 struct net_device *netdev = data;
1139 struct ixgbe_adapter *adapter = netdev_priv(netdev);
1140 struct ixgbe_hw *hw = &adapter->hw;
54037505
DS
1141 u32 eicr;
1142
1143 /*
1144 * Workaround for Silicon errata. Use clear-by-write instead
1145 * of clear-by-read. Reading with EICS will return the
1146 * interrupt causes without clearing, which later be done
1147 * with the write to EICR.
1148 */
1149 eicr = IXGBE_READ_REG(hw, IXGBE_EICS);
1150 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr);
9a799d71 1151
cf8280ee
JB
1152 if (eicr & IXGBE_EICR_LSC)
1153 ixgbe_check_lsc(adapter);
d4f80882 1154
e8e26350
PW
1155 if (hw->mac.type == ixgbe_mac_82598EB)
1156 ixgbe_check_fan_failure(adapter, eicr);
0befdb3e 1157
c4cf55e5 1158 if (hw->mac.type == ixgbe_mac_82599EB) {
e8e26350 1159 ixgbe_check_sfp_event(adapter, eicr);
c4cf55e5
PWJ
1160
1161 /* Handle Flow Director Full threshold interrupt */
1162 if (eicr & IXGBE_EICR_FLOW_DIR) {
1163 int i;
1164 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_FLOW_DIR);
1165 /* Disable transmits before FDIR Re-initialization */
1166 netif_tx_stop_all_queues(netdev);
1167 for (i = 0; i < adapter->num_tx_queues; i++) {
1168 struct ixgbe_ring *tx_ring =
1169 &adapter->tx_ring[i];
1170 if (test_and_clear_bit(__IXGBE_FDIR_INIT_DONE,
1171 &tx_ring->reinit_state))
1172 schedule_work(&adapter->fdir_reinit_task);
1173 }
1174 }
1175 }
d4f80882
AV
1176 if (!test_bit(__IXGBE_DOWN, &adapter->state))
1177 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_OTHER);
9a799d71
AK
1178
1179 return IRQ_HANDLED;
1180}
1181
fe49f04a
AD
1182static inline void ixgbe_irq_enable_queues(struct ixgbe_adapter *adapter,
1183 u64 qmask)
1184{
1185 u32 mask;
1186
1187 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
1188 mask = (IXGBE_EIMS_RTX_QUEUE & qmask);
1189 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask);
1190 } else {
1191 mask = (qmask & 0xFFFFFFFF);
1192 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS_EX(0), mask);
1193 mask = (qmask >> 32);
1194 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS_EX(1), mask);
1195 }
1196 /* skip the flush */
1197}
1198
1199static inline void ixgbe_irq_disable_queues(struct ixgbe_adapter *adapter,
1200 u64 qmask)
1201{
1202 u32 mask;
1203
1204 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
1205 mask = (IXGBE_EIMS_RTX_QUEUE & qmask);
1206 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, mask);
1207 } else {
1208 mask = (qmask & 0xFFFFFFFF);
1209 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), mask);
1210 mask = (qmask >> 32);
1211 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), mask);
1212 }
1213 /* skip the flush */
1214}
1215
9a799d71
AK
1216static irqreturn_t ixgbe_msix_clean_tx(int irq, void *data)
1217{
021230d4
AV
1218 struct ixgbe_q_vector *q_vector = data;
1219 struct ixgbe_adapter *adapter = q_vector->adapter;
3a581073 1220 struct ixgbe_ring *tx_ring;
021230d4
AV
1221 int i, r_idx;
1222
1223 if (!q_vector->txr_count)
1224 return IRQ_HANDLED;
1225
1226 r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
1227 for (i = 0; i < q_vector->txr_count; i++) {
3a581073 1228 tx_ring = &(adapter->tx_ring[r_idx]);
3a581073
JB
1229 tx_ring->total_bytes = 0;
1230 tx_ring->total_packets = 0;
021230d4 1231 r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
b4617240 1232 r_idx + 1);
021230d4 1233 }
9a799d71 1234
91281fd3
AD
1235 /* disable interrupts on this vector only */
1236 ixgbe_irq_disable_queues(adapter, ((u64)1 << q_vector->v_idx));
1237 napi_schedule(&q_vector->napi);
1238
9a799d71
AK
1239 return IRQ_HANDLED;
1240}
1241
021230d4
AV
1242/**
1243 * ixgbe_msix_clean_rx - single unshared vector rx clean (all queues)
1244 * @irq: unused
1245 * @data: pointer to our q_vector struct for this interrupt vector
1246 **/
9a799d71
AK
1247static irqreturn_t ixgbe_msix_clean_rx(int irq, void *data)
1248{
021230d4
AV
1249 struct ixgbe_q_vector *q_vector = data;
1250 struct ixgbe_adapter *adapter = q_vector->adapter;
3a581073 1251 struct ixgbe_ring *rx_ring;
021230d4 1252 int r_idx;
30efa5a3 1253 int i;
021230d4
AV
1254
1255 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
30efa5a3
JB
1256 for (i = 0; i < q_vector->rxr_count; i++) {
1257 rx_ring = &(adapter->rx_ring[r_idx]);
1258 rx_ring->total_bytes = 0;
1259 rx_ring->total_packets = 0;
1260 r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
1261 r_idx + 1);
1262 }
1263
021230d4
AV
1264 if (!q_vector->rxr_count)
1265 return IRQ_HANDLED;
1266
30efa5a3 1267 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
3a581073 1268 rx_ring = &(adapter->rx_ring[r_idx]);
021230d4 1269 /* disable interrupts on this vector only */
fe49f04a 1270 ixgbe_irq_disable_queues(adapter, ((u64)1 << q_vector->v_idx));
288379f0 1271 napi_schedule(&q_vector->napi);
021230d4
AV
1272
1273 return IRQ_HANDLED;
1274}
1275
1276static irqreturn_t ixgbe_msix_clean_many(int irq, void *data)
1277{
91281fd3
AD
1278 struct ixgbe_q_vector *q_vector = data;
1279 struct ixgbe_adapter *adapter = q_vector->adapter;
1280 struct ixgbe_ring *ring;
1281 int r_idx;
1282 int i;
1283
1284 if (!q_vector->txr_count && !q_vector->rxr_count)
1285 return IRQ_HANDLED;
1286
1287 r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
1288 for (i = 0; i < q_vector->txr_count; i++) {
1289 ring = &(adapter->tx_ring[r_idx]);
1290 ring->total_bytes = 0;
1291 ring->total_packets = 0;
1292 r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
1293 r_idx + 1);
1294 }
1295
1296 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
1297 for (i = 0; i < q_vector->rxr_count; i++) {
1298 ring = &(adapter->rx_ring[r_idx]);
1299 ring->total_bytes = 0;
1300 ring->total_packets = 0;
1301 r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
1302 r_idx + 1);
1303 }
1304
1305 /* disable interrupts on this vector only */
1306 ixgbe_irq_disable_queues(adapter, ((u64)1 << q_vector->v_idx));
1307 napi_schedule(&q_vector->napi);
9a799d71 1308
9a799d71
AK
1309 return IRQ_HANDLED;
1310}
1311
021230d4
AV
1312/**
1313 * ixgbe_clean_rxonly - msix (aka one shot) rx clean routine
1314 * @napi: napi struct with our devices info in it
1315 * @budget: amount of work driver is allowed to do this pass, in packets
1316 *
f0848276
JB
1317 * This function is optimized for cleaning one queue only on a single
1318 * q_vector!!!
021230d4 1319 **/
9a799d71
AK
1320static int ixgbe_clean_rxonly(struct napi_struct *napi, int budget)
1321{
021230d4 1322 struct ixgbe_q_vector *q_vector =
b4617240 1323 container_of(napi, struct ixgbe_q_vector, napi);
021230d4 1324 struct ixgbe_adapter *adapter = q_vector->adapter;
f0848276 1325 struct ixgbe_ring *rx_ring = NULL;
9a799d71 1326 int work_done = 0;
021230d4 1327 long r_idx;
9a799d71 1328
021230d4 1329 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
3a581073 1330 rx_ring = &(adapter->rx_ring[r_idx]);
5dd2d332 1331#ifdef CONFIG_IXGBE_DCA
bd0362dd 1332 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
3a581073 1333 ixgbe_update_rx_dca(adapter, rx_ring);
bd0362dd 1334#endif
9a799d71 1335
78b6f4ce 1336 ixgbe_clean_rx_irq(q_vector, rx_ring, &work_done, budget);
9a799d71 1337
021230d4
AV
1338 /* If all Rx work done, exit the polling mode */
1339 if (work_done < budget) {
288379f0 1340 napi_complete(napi);
509ee935 1341 if (adapter->itr_setting & 1)
f494e8fa 1342 ixgbe_set_itr_msix(q_vector);
9a799d71 1343 if (!test_bit(__IXGBE_DOWN, &adapter->state))
fe49f04a
AD
1344 ixgbe_irq_enable_queues(adapter,
1345 ((u64)1 << q_vector->v_idx));
9a799d71
AK
1346 }
1347
1348 return work_done;
1349}
1350
f0848276 1351/**
91281fd3 1352 * ixgbe_clean_rxtx_many - msix (aka one shot) rx clean routine
f0848276
JB
1353 * @napi: napi struct with our devices info in it
1354 * @budget: amount of work driver is allowed to do this pass, in packets
1355 *
1356 * This function will clean more than one rx queue associated with a
1357 * q_vector.
1358 **/
91281fd3 1359static int ixgbe_clean_rxtx_many(struct napi_struct *napi, int budget)
f0848276
JB
1360{
1361 struct ixgbe_q_vector *q_vector =
1362 container_of(napi, struct ixgbe_q_vector, napi);
1363 struct ixgbe_adapter *adapter = q_vector->adapter;
91281fd3 1364 struct ixgbe_ring *ring = NULL;
f0848276
JB
1365 int work_done = 0, i;
1366 long r_idx;
91281fd3
AD
1367 bool tx_clean_complete = true;
1368
1369 r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
1370 for (i = 0; i < q_vector->txr_count; i++) {
1371 ring = &(adapter->tx_ring[r_idx]);
1372#ifdef CONFIG_IXGBE_DCA
1373 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
1374 ixgbe_update_tx_dca(adapter, ring);
1375#endif
1376 tx_clean_complete &= ixgbe_clean_tx_irq(q_vector, ring);
1377 r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
1378 r_idx + 1);
1379 }
f0848276
JB
1380
1381 /* attempt to distribute budget to each queue fairly, but don't allow
1382 * the budget to go below 1 because we'll exit polling */
1383 budget /= (q_vector->rxr_count ?: 1);
1384 budget = max(budget, 1);
1385 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
1386 for (i = 0; i < q_vector->rxr_count; i++) {
91281fd3 1387 ring = &(adapter->rx_ring[r_idx]);
5dd2d332 1388#ifdef CONFIG_IXGBE_DCA
f0848276 1389 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
91281fd3 1390 ixgbe_update_rx_dca(adapter, ring);
f0848276 1391#endif
91281fd3 1392 ixgbe_clean_rx_irq(q_vector, ring, &work_done, budget);
f0848276
JB
1393 r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
1394 r_idx + 1);
1395 }
1396
1397 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
91281fd3 1398 ring = &(adapter->rx_ring[r_idx]);
f0848276 1399 /* If all Rx work done, exit the polling mode */
7f821875 1400 if (work_done < budget) {
288379f0 1401 napi_complete(napi);
509ee935 1402 if (adapter->itr_setting & 1)
f0848276
JB
1403 ixgbe_set_itr_msix(q_vector);
1404 if (!test_bit(__IXGBE_DOWN, &adapter->state))
fe49f04a
AD
1405 ixgbe_irq_enable_queues(adapter,
1406 ((u64)1 << q_vector->v_idx));
f0848276
JB
1407 return 0;
1408 }
1409
1410 return work_done;
1411}
91281fd3
AD
1412
1413/**
1414 * ixgbe_clean_txonly - msix (aka one shot) tx clean routine
1415 * @napi: napi struct with our devices info in it
1416 * @budget: amount of work driver is allowed to do this pass, in packets
1417 *
1418 * This function is optimized for cleaning one queue only on a single
1419 * q_vector!!!
1420 **/
1421static int ixgbe_clean_txonly(struct napi_struct *napi, int budget)
1422{
1423 struct ixgbe_q_vector *q_vector =
1424 container_of(napi, struct ixgbe_q_vector, napi);
1425 struct ixgbe_adapter *adapter = q_vector->adapter;
1426 struct ixgbe_ring *tx_ring = NULL;
1427 int work_done = 0;
1428 long r_idx;
1429
1430 r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
1431 tx_ring = &(adapter->tx_ring[r_idx]);
1432#ifdef CONFIG_IXGBE_DCA
1433 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
1434 ixgbe_update_tx_dca(adapter, tx_ring);
1435#endif
1436
1437 if (!ixgbe_clean_tx_irq(q_vector, tx_ring))
1438 work_done = budget;
1439
1440 /* If all Rx work done, exit the polling mode */
1441 if (work_done < budget) {
1442 napi_complete(napi);
1443 if (adapter->itr_setting & 1)
1444 ixgbe_set_itr_msix(q_vector);
1445 if (!test_bit(__IXGBE_DOWN, &adapter->state))
1446 ixgbe_irq_enable_queues(adapter, ((u64)1 << q_vector->v_idx));
1447 }
1448
1449 return work_done;
1450}
1451
021230d4 1452static inline void map_vector_to_rxq(struct ixgbe_adapter *a, int v_idx,
b4617240 1453 int r_idx)
021230d4 1454{
7a921c93
AD
1455 struct ixgbe_q_vector *q_vector = a->q_vector[v_idx];
1456
1457 set_bit(r_idx, q_vector->rxr_idx);
1458 q_vector->rxr_count++;
021230d4
AV
1459}
1460
1461static inline void map_vector_to_txq(struct ixgbe_adapter *a, int v_idx,
7a921c93 1462 int t_idx)
021230d4 1463{
7a921c93
AD
1464 struct ixgbe_q_vector *q_vector = a->q_vector[v_idx];
1465
1466 set_bit(t_idx, q_vector->txr_idx);
1467 q_vector->txr_count++;
021230d4
AV
1468}
1469
9a799d71 1470/**
021230d4
AV
1471 * ixgbe_map_rings_to_vectors - Maps descriptor rings to vectors
1472 * @adapter: board private structure to initialize
1473 * @vectors: allotted vector count for descriptor rings
9a799d71 1474 *
021230d4
AV
1475 * This function maps descriptor rings to the queue-specific vectors
1476 * we were allotted through the MSI-X enabling code. Ideally, we'd have
1477 * one vector per ring/queue, but on a constrained vector budget, we
1478 * group the rings as "efficiently" as possible. You would add new
1479 * mapping configurations in here.
9a799d71 1480 **/
021230d4 1481static int ixgbe_map_rings_to_vectors(struct ixgbe_adapter *adapter,
b4617240 1482 int vectors)
021230d4
AV
1483{
1484 int v_start = 0;
1485 int rxr_idx = 0, txr_idx = 0;
1486 int rxr_remaining = adapter->num_rx_queues;
1487 int txr_remaining = adapter->num_tx_queues;
1488 int i, j;
1489 int rqpv, tqpv;
1490 int err = 0;
1491
1492 /* No mapping required if MSI-X is disabled. */
1493 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
1494 goto out;
9a799d71 1495
021230d4
AV
1496 /*
1497 * The ideal configuration...
1498 * We have enough vectors to map one per queue.
1499 */
1500 if (vectors == adapter->num_rx_queues + adapter->num_tx_queues) {
1501 for (; rxr_idx < rxr_remaining; v_start++, rxr_idx++)
1502 map_vector_to_rxq(adapter, v_start, rxr_idx);
9a799d71 1503
021230d4
AV
1504 for (; txr_idx < txr_remaining; v_start++, txr_idx++)
1505 map_vector_to_txq(adapter, v_start, txr_idx);
9a799d71 1506
9a799d71 1507 goto out;
021230d4 1508 }
9a799d71 1509
021230d4
AV
1510 /*
1511 * If we don't have enough vectors for a 1-to-1
1512 * mapping, we'll have to group them so there are
1513 * multiple queues per vector.
1514 */
1515 /* Re-adjusting *qpv takes care of the remainder. */
1516 for (i = v_start; i < vectors; i++) {
1517 rqpv = DIV_ROUND_UP(rxr_remaining, vectors - i);
1518 for (j = 0; j < rqpv; j++) {
1519 map_vector_to_rxq(adapter, i, rxr_idx);
1520 rxr_idx++;
1521 rxr_remaining--;
1522 }
1523 }
1524 for (i = v_start; i < vectors; i++) {
1525 tqpv = DIV_ROUND_UP(txr_remaining, vectors - i);
1526 for (j = 0; j < tqpv; j++) {
1527 map_vector_to_txq(adapter, i, txr_idx);
1528 txr_idx++;
1529 txr_remaining--;
9a799d71 1530 }
9a799d71
AK
1531 }
1532
021230d4
AV
1533out:
1534 return err;
1535}
1536
1537/**
1538 * ixgbe_request_msix_irqs - Initialize MSI-X interrupts
1539 * @adapter: board private structure
1540 *
1541 * ixgbe_request_msix_irqs allocates MSI-X vectors and requests
1542 * interrupts from the kernel.
1543 **/
1544static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter)
1545{
1546 struct net_device *netdev = adapter->netdev;
1547 irqreturn_t (*handler)(int, void *);
1548 int i, vector, q_vectors, err;
cb13fc20 1549 int ri=0, ti=0;
021230d4
AV
1550
1551 /* Decrement for Other and TCP Timer vectors */
1552 q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1553
1554 /* Map the Tx/Rx rings to the vectors we were allotted. */
1555 err = ixgbe_map_rings_to_vectors(adapter, q_vectors);
1556 if (err)
1557 goto out;
1558
1559#define SET_HANDLER(_v) ((!(_v)->rxr_count) ? &ixgbe_msix_clean_tx : \
b4617240
PW
1560 (!(_v)->txr_count) ? &ixgbe_msix_clean_rx : \
1561 &ixgbe_msix_clean_many)
021230d4 1562 for (vector = 0; vector < q_vectors; vector++) {
7a921c93 1563 handler = SET_HANDLER(adapter->q_vector[vector]);
cb13fc20
RO
1564
1565 if(handler == &ixgbe_msix_clean_rx) {
1566 sprintf(adapter->name[vector], "%s-%s-%d",
1567 netdev->name, "rx", ri++);
1568 }
1569 else if(handler == &ixgbe_msix_clean_tx) {
1570 sprintf(adapter->name[vector], "%s-%s-%d",
1571 netdev->name, "tx", ti++);
1572 }
1573 else
1574 sprintf(adapter->name[vector], "%s-%s-%d",
1575 netdev->name, "TxRx", vector);
1576
021230d4 1577 err = request_irq(adapter->msix_entries[vector].vector,
b4617240 1578 handler, 0, adapter->name[vector],
7a921c93 1579 adapter->q_vector[vector]);
9a799d71
AK
1580 if (err) {
1581 DPRINTK(PROBE, ERR,
b4617240
PW
1582 "request_irq failed for MSIX interrupt "
1583 "Error: %d\n", err);
021230d4 1584 goto free_queue_irqs;
9a799d71 1585 }
9a799d71
AK
1586 }
1587
021230d4
AV
1588 sprintf(adapter->name[vector], "%s:lsc", netdev->name);
1589 err = request_irq(adapter->msix_entries[vector].vector,
b4617240 1590 &ixgbe_msix_lsc, 0, adapter->name[vector], netdev);
9a799d71
AK
1591 if (err) {
1592 DPRINTK(PROBE, ERR,
1593 "request_irq for msix_lsc failed: %d\n", err);
021230d4 1594 goto free_queue_irqs;
9a799d71
AK
1595 }
1596
9a799d71
AK
1597 return 0;
1598
021230d4
AV
1599free_queue_irqs:
1600 for (i = vector - 1; i >= 0; i--)
1601 free_irq(adapter->msix_entries[--vector].vector,
7a921c93 1602 adapter->q_vector[i]);
021230d4
AV
1603 adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
1604 pci_disable_msix(adapter->pdev);
9a799d71
AK
1605 kfree(adapter->msix_entries);
1606 adapter->msix_entries = NULL;
021230d4 1607out:
9a799d71
AK
1608 return err;
1609}
1610
f494e8fa
AV
1611static void ixgbe_set_itr(struct ixgbe_adapter *adapter)
1612{
7a921c93 1613 struct ixgbe_q_vector *q_vector = adapter->q_vector[0];
f494e8fa
AV
1614 u8 current_itr;
1615 u32 new_itr = q_vector->eitr;
1616 struct ixgbe_ring *rx_ring = &adapter->rx_ring[0];
1617 struct ixgbe_ring *tx_ring = &adapter->tx_ring[0];
1618
30efa5a3 1619 q_vector->tx_itr = ixgbe_update_itr(adapter, new_itr,
b4617240
PW
1620 q_vector->tx_itr,
1621 tx_ring->total_packets,
1622 tx_ring->total_bytes);
30efa5a3 1623 q_vector->rx_itr = ixgbe_update_itr(adapter, new_itr,
b4617240
PW
1624 q_vector->rx_itr,
1625 rx_ring->total_packets,
1626 rx_ring->total_bytes);
f494e8fa 1627
30efa5a3 1628 current_itr = max(q_vector->rx_itr, q_vector->tx_itr);
f494e8fa
AV
1629
1630 switch (current_itr) {
1631 /* counts and packets in update_itr are dependent on these numbers */
1632 case lowest_latency:
1633 new_itr = 100000;
1634 break;
1635 case low_latency:
1636 new_itr = 20000; /* aka hwitr = ~200 */
1637 break;
1638 case bulk_latency:
1639 new_itr = 8000;
1640 break;
1641 default:
1642 break;
1643 }
1644
1645 if (new_itr != q_vector->eitr) {
fe49f04a
AD
1646 /* do an exponential smoothing */
1647 new_itr = ((q_vector->eitr * 90)/100) + ((new_itr * 10)/100);
509ee935
JB
1648
1649 /* save the algorithm value here, not the smoothed one */
1650 q_vector->eitr = new_itr;
fe49f04a
AD
1651
1652 ixgbe_write_eitr(q_vector);
f494e8fa
AV
1653 }
1654
1655 return;
1656}
1657
79aefa45
AD
1658/**
1659 * ixgbe_irq_enable - Enable default interrupt generation settings
1660 * @adapter: board private structure
1661 **/
1662static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter)
1663{
1664 u32 mask;
835462fc
NS
1665
1666 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
6ab33d51
DM
1667 if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE)
1668 mask |= IXGBE_EIMS_GPI_SDP1;
e8e26350 1669 if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
2a41ff81 1670 mask |= IXGBE_EIMS_ECC;
e8e26350
PW
1671 mask |= IXGBE_EIMS_GPI_SDP1;
1672 mask |= IXGBE_EIMS_GPI_SDP2;
1673 }
c4cf55e5
PWJ
1674 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE ||
1675 adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)
1676 mask |= IXGBE_EIMS_FLOW_DIR;
e8e26350 1677
79aefa45 1678 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask);
835462fc 1679 ixgbe_irq_enable_queues(adapter, ~0);
79aefa45
AD
1680 IXGBE_WRITE_FLUSH(&adapter->hw);
1681}
021230d4 1682
9a799d71 1683/**
021230d4 1684 * ixgbe_intr - legacy mode Interrupt Handler
9a799d71
AK
1685 * @irq: interrupt number
1686 * @data: pointer to a network interface device structure
9a799d71
AK
1687 **/
1688static irqreturn_t ixgbe_intr(int irq, void *data)
1689{
1690 struct net_device *netdev = data;
1691 struct ixgbe_adapter *adapter = netdev_priv(netdev);
1692 struct ixgbe_hw *hw = &adapter->hw;
7a921c93 1693 struct ixgbe_q_vector *q_vector = adapter->q_vector[0];
9a799d71
AK
1694 u32 eicr;
1695
54037505
DS
1696 /*
1697 * Workaround for silicon errata. Mask the interrupts
1698 * before the read of EICR.
1699 */
1700 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK);
1701
021230d4
AV
1702 /* for NAPI, using EIAM to auto-mask tx/rx interrupt bits on read
1703 * therefore no explict interrupt disable is necessary */
1704 eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
f47cf66e
JB
1705 if (!eicr) {
1706 /* shared interrupt alert!
1707 * make sure interrupts are enabled because the read will
1708 * have disabled interrupts due to EIAM */
1709 ixgbe_irq_enable(adapter);
9a799d71 1710 return IRQ_NONE; /* Not our interrupt */
f47cf66e 1711 }
9a799d71 1712
cf8280ee
JB
1713 if (eicr & IXGBE_EICR_LSC)
1714 ixgbe_check_lsc(adapter);
021230d4 1715
e8e26350
PW
1716 if (hw->mac.type == ixgbe_mac_82599EB)
1717 ixgbe_check_sfp_event(adapter, eicr);
1718
0befdb3e
JB
1719 ixgbe_check_fan_failure(adapter, eicr);
1720
7a921c93 1721 if (napi_schedule_prep(&(q_vector->napi))) {
f494e8fa
AV
1722 adapter->tx_ring[0].total_packets = 0;
1723 adapter->tx_ring[0].total_bytes = 0;
1724 adapter->rx_ring[0].total_packets = 0;
1725 adapter->rx_ring[0].total_bytes = 0;
021230d4 1726 /* would disable interrupts here but EIAM disabled it */
7a921c93 1727 __napi_schedule(&(q_vector->napi));
9a799d71
AK
1728 }
1729
1730 return IRQ_HANDLED;
1731}
1732
021230d4
AV
1733static inline void ixgbe_reset_q_vectors(struct ixgbe_adapter *adapter)
1734{
1735 int i, q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1736
1737 for (i = 0; i < q_vectors; i++) {
7a921c93 1738 struct ixgbe_q_vector *q_vector = adapter->q_vector[i];
021230d4
AV
1739 bitmap_zero(q_vector->rxr_idx, MAX_RX_QUEUES);
1740 bitmap_zero(q_vector->txr_idx, MAX_TX_QUEUES);
1741 q_vector->rxr_count = 0;
1742 q_vector->txr_count = 0;
1743 }
1744}
1745
9a799d71
AK
1746/**
1747 * ixgbe_request_irq - initialize interrupts
1748 * @adapter: board private structure
1749 *
1750 * Attempts to configure interrupts using the best available
1751 * capabilities of the hardware and kernel.
1752 **/
021230d4 1753static int ixgbe_request_irq(struct ixgbe_adapter *adapter)
9a799d71
AK
1754{
1755 struct net_device *netdev = adapter->netdev;
021230d4 1756 int err;
9a799d71 1757
021230d4
AV
1758 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
1759 err = ixgbe_request_msix_irqs(adapter);
1760 } else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) {
1761 err = request_irq(adapter->pdev->irq, &ixgbe_intr, 0,
b4617240 1762 netdev->name, netdev);
021230d4
AV
1763 } else {
1764 err = request_irq(adapter->pdev->irq, &ixgbe_intr, IRQF_SHARED,
b4617240 1765 netdev->name, netdev);
9a799d71
AK
1766 }
1767
9a799d71
AK
1768 if (err)
1769 DPRINTK(PROBE, ERR, "request_irq failed, Error %d\n", err);
1770
9a799d71
AK
1771 return err;
1772}
1773
1774static void ixgbe_free_irq(struct ixgbe_adapter *adapter)
1775{
1776 struct net_device *netdev = adapter->netdev;
1777
1778 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
021230d4 1779 int i, q_vectors;
9a799d71 1780
021230d4
AV
1781 q_vectors = adapter->num_msix_vectors;
1782
1783 i = q_vectors - 1;
9a799d71 1784 free_irq(adapter->msix_entries[i].vector, netdev);
9a799d71 1785
021230d4
AV
1786 i--;
1787 for (; i >= 0; i--) {
1788 free_irq(adapter->msix_entries[i].vector,
7a921c93 1789 adapter->q_vector[i]);
021230d4
AV
1790 }
1791
1792 ixgbe_reset_q_vectors(adapter);
1793 } else {
1794 free_irq(adapter->pdev->irq, netdev);
9a799d71
AK
1795 }
1796}
1797
22d5a71b
JB
1798/**
1799 * ixgbe_irq_disable - Mask off interrupt generation on the NIC
1800 * @adapter: board private structure
1801 **/
1802static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter)
1803{
835462fc
NS
1804 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
1805 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0);
1806 } else {
1807 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000);
1808 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0);
22d5a71b 1809 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0);
22d5a71b
JB
1810 }
1811 IXGBE_WRITE_FLUSH(&adapter->hw);
1812 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
1813 int i;
1814 for (i = 0; i < adapter->num_msix_vectors; i++)
1815 synchronize_irq(adapter->msix_entries[i].vector);
1816 } else {
1817 synchronize_irq(adapter->pdev->irq);
1818 }
1819}
1820
9a799d71
AK
1821/**
1822 * ixgbe_configure_msi_and_legacy - Initialize PIN (INTA...) and MSI interrupts
1823 *
1824 **/
1825static void ixgbe_configure_msi_and_legacy(struct ixgbe_adapter *adapter)
1826{
9a799d71
AK
1827 struct ixgbe_hw *hw = &adapter->hw;
1828
021230d4 1829 IXGBE_WRITE_REG(hw, IXGBE_EITR(0),
30efa5a3 1830 EITR_INTS_PER_SEC_TO_REG(adapter->eitr_param));
9a799d71 1831
e8e26350
PW
1832 ixgbe_set_ivar(adapter, 0, 0, 0);
1833 ixgbe_set_ivar(adapter, 1, 0, 0);
021230d4
AV
1834
1835 map_vector_to_rxq(adapter, 0, 0);
1836 map_vector_to_txq(adapter, 0, 0);
1837
1838 DPRINTK(HW, INFO, "Legacy interrupt IVAR setup done\n");
9a799d71
AK
1839}
1840
1841/**
3a581073 1842 * ixgbe_configure_tx - Configure 8259x Transmit Unit after Reset
9a799d71
AK
1843 * @adapter: board private structure
1844 *
1845 * Configure the Tx unit of the MAC after a reset.
1846 **/
1847static void ixgbe_configure_tx(struct ixgbe_adapter *adapter)
1848{
12207e49 1849 u64 tdba;
9a799d71 1850 struct ixgbe_hw *hw = &adapter->hw;
021230d4 1851 u32 i, j, tdlen, txctrl;
9a799d71
AK
1852
1853 /* Setup the HW Tx Head and Tail descriptor pointers */
1854 for (i = 0; i < adapter->num_tx_queues; i++) {
e01c31a5
JB
1855 struct ixgbe_ring *ring = &adapter->tx_ring[i];
1856 j = ring->reg_idx;
1857 tdba = ring->dma;
1858 tdlen = ring->count * sizeof(union ixgbe_adv_tx_desc);
021230d4 1859 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(j),
284901a9 1860 (tdba & DMA_BIT_MASK(32)));
021230d4
AV
1861 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(j), (tdba >> 32));
1862 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(j), tdlen);
1863 IXGBE_WRITE_REG(hw, IXGBE_TDH(j), 0);
1864 IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0);
1865 adapter->tx_ring[i].head = IXGBE_TDH(j);
1866 adapter->tx_ring[i].tail = IXGBE_TDT(j);
1867 /* Disable Tx Head Writeback RO bit, since this hoses
1868 * bookkeeping if things aren't delivered in order.
1869 */
e01c31a5 1870 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(j));
021230d4 1871 txctrl &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
e01c31a5 1872 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(j), txctrl);
9a799d71 1873 }
e8e26350
PW
1874 if (hw->mac.type == ixgbe_mac_82599EB) {
1875 /* We enable 8 traffic classes, DCB only */
1876 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED)
1877 IXGBE_WRITE_REG(hw, IXGBE_MTQC, (IXGBE_MTQC_RT_ENA |
1878 IXGBE_MTQC_8TC_8TQ));
1879 }
9a799d71
AK
1880}
1881
e8e26350 1882#define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
cc41ac7c
JB
1883
1884static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter, int index)
1885{
1886 struct ixgbe_ring *rx_ring;
1887 u32 srrctl;
e8e26350 1888 int queue0 = 0;
3be1adfb 1889 unsigned long mask;
0cefafad 1890 struct ixgbe_ring_feature *feature = adapter->ring_feature;
3be1adfb 1891
e8e26350 1892 if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
163de42e 1893 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
0cefafad 1894 int dcb_i = feature[RING_F_DCB].indices;
163de42e
AD
1895 if (dcb_i == 8)
1896 queue0 = index >> 4;
1897 else if (dcb_i == 4)
1898 queue0 = index >> 5;
1899 else
1900 dev_err(&adapter->pdev->dev, "Invalid DCB "
1901 "configuration\n");
0331a832
YZ
1902#ifdef IXGBE_FCOE
1903 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
1904 struct ixgbe_ring_feature *f;
1905
1906 rx_ring = &adapter->rx_ring[queue0];
1907 f = &adapter->ring_feature[RING_F_FCOE];
1908 if ((queue0 == 0) && (index > rx_ring->reg_idx))
1909 queue0 = f->mask + index -
1910 rx_ring->reg_idx - 1;
1911 }
1912#endif /* IXGBE_FCOE */
163de42e
AD
1913 } else {
1914 queue0 = index;
1915 }
cc41ac7c 1916 } else {
0cefafad 1917 mask = (unsigned long) feature[RING_F_RSS].mask;
3be1adfb
AD
1918 queue0 = index & mask;
1919 index = index & mask;
cc41ac7c 1920 }
3be1adfb 1921
cc41ac7c
JB
1922 rx_ring = &adapter->rx_ring[queue0];
1923
1924 srrctl = IXGBE_READ_REG(&adapter->hw, IXGBE_SRRCTL(index));
1925
1926 srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
1927 srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
1928
afafd5b0
AD
1929 srrctl |= (IXGBE_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) &
1930 IXGBE_SRRCTL_BSIZEHDR_MASK;
1931
cc41ac7c 1932 if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
afafd5b0
AD
1933#if (PAGE_SIZE / 2) > IXGBE_MAX_RXBUFFER
1934 srrctl |= IXGBE_MAX_RXBUFFER >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1935#else
1936 srrctl |= (PAGE_SIZE / 2) >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1937#endif
cc41ac7c 1938 srrctl |= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
cc41ac7c 1939 } else {
afafd5b0
AD
1940 srrctl |= ALIGN(rx_ring->rx_buf_len, 1024) >>
1941 IXGBE_SRRCTL_BSIZEPKT_SHIFT;
cc41ac7c 1942 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
cc41ac7c 1943 }
e8e26350 1944
cc41ac7c
JB
1945 IXGBE_WRITE_REG(&adapter->hw, IXGBE_SRRCTL(index), srrctl);
1946}
9a799d71 1947
0cefafad
JB
1948static u32 ixgbe_setup_mrqc(struct ixgbe_adapter *adapter)
1949{
1950 u32 mrqc = 0;
1951 int mask;
1952
1953 if (!(adapter->hw.mac.type == ixgbe_mac_82599EB))
1954 return mrqc;
1955
1956 mask = adapter->flags & (IXGBE_FLAG_RSS_ENABLED
1957#ifdef CONFIG_IXGBE_DCB
1958 | IXGBE_FLAG_DCB_ENABLED
1959#endif
1960 );
1961
1962 switch (mask) {
1963 case (IXGBE_FLAG_RSS_ENABLED):
1964 mrqc = IXGBE_MRQC_RSSEN;
1965 break;
1966#ifdef CONFIG_IXGBE_DCB
1967 case (IXGBE_FLAG_DCB_ENABLED):
1968 mrqc = IXGBE_MRQC_RT8TCEN;
1969 break;
1970#endif /* CONFIG_IXGBE_DCB */
1971 default:
1972 break;
1973 }
1974
1975 return mrqc;
1976}
1977
9a799d71 1978/**
3a581073 1979 * ixgbe_configure_rx - Configure 8259x Receive Unit after Reset
9a799d71
AK
1980 * @adapter: board private structure
1981 *
1982 * Configure the Rx unit of the MAC after a reset.
1983 **/
1984static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
1985{
1986 u64 rdba;
1987 struct ixgbe_hw *hw = &adapter->hw;
1988 struct net_device *netdev = adapter->netdev;
1989 int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
021230d4 1990 int i, j;
9a799d71 1991 u32 rdlen, rxctrl, rxcsum;
7c6e0a43
JB
1992 static const u32 seed[10] = { 0xE291D73D, 0x1805EC6C, 0x2A94B30D,
1993 0xA54F2BEC, 0xEA49AF7C, 0xE214AD3D, 0xB855AABE,
1994 0x6A3E67EA, 0x14364D17, 0x3BED200D};
9a799d71 1995 u32 fctrl, hlreg0;
509ee935 1996 u32 reta = 0, mrqc = 0;
cc41ac7c 1997 u32 rdrxctl;
f8212f97 1998 u32 rscctrl;
7c6e0a43 1999 int rx_buf_len;
9a799d71
AK
2000
2001 /* Decide whether to use packet split mode or not */
762f4c57 2002 adapter->flags |= IXGBE_FLAG_RX_PS_ENABLED;
9a799d71 2003
eacd73f7
YZ
2004#ifdef IXGBE_FCOE
2005 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)
2006 adapter->flags &= ~IXGBE_FLAG_RX_PS_ENABLED;
2007#endif /* IXGBE_FCOE */
2008
9a799d71
AK
2009 /* Set the RX buffer length according to the mode */
2010 if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
7c6e0a43 2011 rx_buf_len = IXGBE_RX_HDR_SIZE;
e8e26350
PW
2012 if (hw->mac.type == ixgbe_mac_82599EB) {
2013 /* PSRTYPE must be initialized in 82599 */
2014 u32 psrtype = IXGBE_PSRTYPE_TCPHDR |
2015 IXGBE_PSRTYPE_UDPHDR |
2016 IXGBE_PSRTYPE_IPV4HDR |
dfa12f05
YZ
2017 IXGBE_PSRTYPE_IPV6HDR |
2018 IXGBE_PSRTYPE_L2HDR;
e8e26350
PW
2019 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype);
2020 }
9a799d71 2021 } else {
df647b5c 2022 if (!(adapter->flags & IXGBE_FLAG2_RSC_ENABLED) &&
f8212f97 2023 (netdev->mtu <= ETH_DATA_LEN))
7c6e0a43 2024 rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE;
9a799d71 2025 else
7c6e0a43 2026 rx_buf_len = ALIGN(max_frame, 1024);
9a799d71
AK
2027 }
2028
2029 fctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
2030 fctrl |= IXGBE_FCTRL_BAM;
021230d4 2031 fctrl |= IXGBE_FCTRL_DPF; /* discard pause frames when FC enabled */
e8e26350 2032 fctrl |= IXGBE_FCTRL_PMCF;
9a799d71
AK
2033 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, fctrl);
2034
2035 hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
2036 if (adapter->netdev->mtu <= ETH_DATA_LEN)
2037 hlreg0 &= ~IXGBE_HLREG0_JUMBOEN;
2038 else
2039 hlreg0 |= IXGBE_HLREG0_JUMBOEN;
63f39bd1
YZ
2040#ifdef IXGBE_FCOE
2041 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)
2042 hlreg0 |= IXGBE_HLREG0_JUMBOEN;
2043#endif
9a799d71
AK
2044 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
2045
9a799d71
AK
2046 rdlen = adapter->rx_ring[0].count * sizeof(union ixgbe_adv_rx_desc);
2047 /* disable receives while setting up the descriptors */
2048 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
2049 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN);
2050
0cefafad
JB
2051 /*
2052 * Setup the HW Rx Head and Tail Descriptor Pointers and
2053 * the Base and Length of the Rx Descriptor Ring
2054 */
9a799d71
AK
2055 for (i = 0; i < adapter->num_rx_queues; i++) {
2056 rdba = adapter->rx_ring[i].dma;
7c6e0a43 2057 j = adapter->rx_ring[i].reg_idx;
284901a9 2058 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(j), (rdba & DMA_BIT_MASK(32)));
7c6e0a43
JB
2059 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(j), (rdba >> 32));
2060 IXGBE_WRITE_REG(hw, IXGBE_RDLEN(j), rdlen);
2061 IXGBE_WRITE_REG(hw, IXGBE_RDH(j), 0);
2062 IXGBE_WRITE_REG(hw, IXGBE_RDT(j), 0);
2063 adapter->rx_ring[i].head = IXGBE_RDH(j);
2064 adapter->rx_ring[i].tail = IXGBE_RDT(j);
2065 adapter->rx_ring[i].rx_buf_len = rx_buf_len;
cc41ac7c 2066
63f39bd1
YZ
2067#ifdef IXGBE_FCOE
2068 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
2069 struct ixgbe_ring_feature *f;
2070 f = &adapter->ring_feature[RING_F_FCOE];
2071 if ((rx_buf_len < IXGBE_FCOE_JUMBO_FRAME_SIZE) &&
2072 (i >= f->mask) && (i < f->mask + f->indices))
2073 adapter->rx_ring[i].rx_buf_len =
2074 IXGBE_FCOE_JUMBO_FRAME_SIZE;
2075 }
2076
2077#endif /* IXGBE_FCOE */
cc41ac7c 2078 ixgbe_configure_srrctl(adapter, j);
9a799d71
AK
2079 }
2080
e8e26350
PW
2081 if (hw->mac.type == ixgbe_mac_82598EB) {
2082 /*
2083 * For VMDq support of different descriptor types or
2084 * buffer sizes through the use of multiple SRRCTL
2085 * registers, RDRXCTL.MVMEN must be set to 1
2086 *
2087 * also, the manual doesn't mention it clearly but DCA hints
2088 * will only use queue 0's tags unless this bit is set. Side
2089 * effects of setting this bit are only that SRRCTL must be
2090 * fully programmed [0..15]
2091 */
2a41ff81
JB
2092 rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
2093 rdrxctl |= IXGBE_RDRXCTL_MVMEN;
2094 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl);
2f90b865 2095 }
177db6ff 2096
e8e26350 2097 /* Program MRQC for the distribution of queues */
0cefafad 2098 mrqc = ixgbe_setup_mrqc(adapter);
e8e26350 2099
021230d4 2100 if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
9a799d71 2101 /* Fill out redirection table */
021230d4
AV
2102 for (i = 0, j = 0; i < 128; i++, j++) {
2103 if (j == adapter->ring_feature[RING_F_RSS].indices)
2104 j = 0;
2105 /* reta = 4-byte sliding window of
2106 * 0x00..(indices-1)(indices-1)00..etc. */
2107 reta = (reta << 8) | (j * 0x11);
2108 if ((i & 3) == 3)
2109 IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
9a799d71
AK
2110 }
2111
2112 /* Fill out hash function seeds */
2113 for (i = 0; i < 10; i++)
7c6e0a43 2114 IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), seed[i]);
9a799d71 2115
2a41ff81
JB
2116 if (hw->mac.type == ixgbe_mac_82598EB)
2117 mrqc |= IXGBE_MRQC_RSSEN;
9a799d71 2118 /* Perform hash on these packet types */
2a41ff81
JB
2119 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4
2120 | IXGBE_MRQC_RSS_FIELD_IPV4_TCP
2121 | IXGBE_MRQC_RSS_FIELD_IPV4_UDP
2122 | IXGBE_MRQC_RSS_FIELD_IPV6
2123 | IXGBE_MRQC_RSS_FIELD_IPV6_TCP
2124 | IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
021230d4 2125 }
2a41ff81 2126 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
9a799d71 2127
021230d4
AV
2128 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
2129
2130 if (adapter->flags & IXGBE_FLAG_RSS_ENABLED ||
2131 adapter->flags & IXGBE_FLAG_RX_CSUM_ENABLED) {
2132 /* Disable indicating checksum in descriptor, enables
2133 * RSS hash */
9a799d71 2134 rxcsum |= IXGBE_RXCSUM_PCSD;
9a799d71 2135 }
021230d4
AV
2136 if (!(rxcsum & IXGBE_RXCSUM_PCSD)) {
2137 /* Enable IPv4 payload checksum for UDP fragments
2138 * if PCSD is not set */
2139 rxcsum |= IXGBE_RXCSUM_IPPCSE;
2140 }
2141
2142 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
e8e26350
PW
2143
2144 if (hw->mac.type == ixgbe_mac_82599EB) {
2145 rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
2146 rdrxctl |= IXGBE_RDRXCTL_CRCSTRIP;
f8212f97 2147 rdrxctl &= ~IXGBE_RDRXCTL_RSCFRSTSIZE;
e8e26350
PW
2148 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl);
2149 }
f8212f97 2150
df647b5c 2151 if (adapter->flags & IXGBE_FLAG2_RSC_ENABLED) {
f8212f97
AD
2152 /* Enable 82599 HW-RSC */
2153 for (i = 0; i < adapter->num_rx_queues; i++) {
2154 j = adapter->rx_ring[i].reg_idx;
2155 rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(j));
2156 rscctrl |= IXGBE_RSCCTL_RSCEN;
2157 /*
e76678dd
AD
2158 * we must limit the number of descriptors so that the
2159 * total size of max desc * buf_len is not greater
2160 * than 65535
f8212f97 2161 */
e76678dd
AD
2162 if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
2163#if (MAX_SKB_FRAGS > 16)
2164 rscctrl |= IXGBE_RSCCTL_MAXDESC_16;
2165#elif (MAX_SKB_FRAGS > 8)
f8212f97 2166 rscctrl |= IXGBE_RSCCTL_MAXDESC_8;
e76678dd
AD
2167#elif (MAX_SKB_FRAGS > 4)
2168 rscctrl |= IXGBE_RSCCTL_MAXDESC_4;
f8212f97 2169#else
e76678dd 2170 rscctrl |= IXGBE_RSCCTL_MAXDESC_1;
f8212f97 2171#endif
e76678dd
AD
2172 } else {
2173 if (rx_buf_len < IXGBE_RXBUFFER_4096)
2174 rscctrl |= IXGBE_RSCCTL_MAXDESC_16;
2175 else if (rx_buf_len < IXGBE_RXBUFFER_8192)
2176 rscctrl |= IXGBE_RSCCTL_MAXDESC_8;
2177 else
2178 rscctrl |= IXGBE_RSCCTL_MAXDESC_4;
2179 }
f8212f97
AD
2180 IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(j), rscctrl);
2181 }
2182 /* Disable RSC for ACK packets */
2183 IXGBE_WRITE_REG(hw, IXGBE_RSCDBU,
2184 (IXGBE_RSCDBU_RSCACKDIS | IXGBE_READ_REG(hw, IXGBE_RSCDBU)));
2185 }
9a799d71
AK
2186}
2187
068c89b0
DS
2188static void ixgbe_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
2189{
2190 struct ixgbe_adapter *adapter = netdev_priv(netdev);
2191 struct ixgbe_hw *hw = &adapter->hw;
2192
2193 /* add VID to filter table */
2194 hw->mac.ops.set_vfta(&adapter->hw, vid, 0, true);
2195}
2196
2197static void ixgbe_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
2198{
2199 struct ixgbe_adapter *adapter = netdev_priv(netdev);
2200 struct ixgbe_hw *hw = &adapter->hw;
2201
2202 if (!test_bit(__IXGBE_DOWN, &adapter->state))
2203 ixgbe_irq_disable(adapter);
2204
2205 vlan_group_set_device(adapter->vlgrp, vid, NULL);
2206
2207 if (!test_bit(__IXGBE_DOWN, &adapter->state))
2208 ixgbe_irq_enable(adapter);
2209
2210 /* remove VID from filter table */
2211 hw->mac.ops.set_vfta(&adapter->hw, vid, 0, false);
2212}
2213
9a799d71 2214static void ixgbe_vlan_rx_register(struct net_device *netdev,
b4617240 2215 struct vlan_group *grp)
9a799d71
AK
2216{
2217 struct ixgbe_adapter *adapter = netdev_priv(netdev);
2218 u32 ctrl;
e8e26350 2219 int i, j;
9a799d71 2220
d4f80882
AV
2221 if (!test_bit(__IXGBE_DOWN, &adapter->state))
2222 ixgbe_irq_disable(adapter);
9a799d71
AK
2223 adapter->vlgrp = grp;
2224
2f90b865
AD
2225 /*
2226 * For a DCB driver, always enable VLAN tag stripping so we can
2227 * still receive traffic from a DCB-enabled host even if we're
2228 * not in DCB mode.
2229 */
2230 ctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_VLNCTRL);
e8e26350
PW
2231 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
2232 ctrl |= IXGBE_VLNCTRL_VME | IXGBE_VLNCTRL_VFE;
2233 ctrl &= ~IXGBE_VLNCTRL_CFIEN;
2234 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VLNCTRL, ctrl);
2235 } else if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
2236 ctrl |= IXGBE_VLNCTRL_VFE;
9a799d71
AK
2237 /* enable VLAN tag insert/strip */
2238 ctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_VLNCTRL);
9a799d71
AK
2239 ctrl &= ~IXGBE_VLNCTRL_CFIEN;
2240 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VLNCTRL, ctrl);
e8e26350
PW
2241 for (i = 0; i < adapter->num_rx_queues; i++) {
2242 j = adapter->rx_ring[i].reg_idx;
2243 ctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_RXDCTL(j));
2244 ctrl |= IXGBE_RXDCTL_VME;
2245 IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXDCTL(j), ctrl);
2246 }
9a799d71 2247 }
e8e26350 2248 ixgbe_vlan_rx_add_vid(netdev, 0);
9a799d71 2249
d4f80882
AV
2250 if (!test_bit(__IXGBE_DOWN, &adapter->state))
2251 ixgbe_irq_enable(adapter);
9a799d71
AK
2252}
2253
9a799d71
AK
2254static void ixgbe_restore_vlan(struct ixgbe_adapter *adapter)
2255{
2256 ixgbe_vlan_rx_register(adapter->netdev, adapter->vlgrp);
2257
2258 if (adapter->vlgrp) {
2259 u16 vid;
2260 for (vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) {
2261 if (!vlan_group_get_device(adapter->vlgrp, vid))
2262 continue;
2263 ixgbe_vlan_rx_add_vid(adapter->netdev, vid);
2264 }
2265 }
2266}
2267
2c5645cf
CL
2268static u8 *ixgbe_addr_list_itr(struct ixgbe_hw *hw, u8 **mc_addr_ptr, u32 *vmdq)
2269{
2270 struct dev_mc_list *mc_ptr;
2271 u8 *addr = *mc_addr_ptr;
2272 *vmdq = 0;
2273
2274 mc_ptr = container_of(addr, struct dev_mc_list, dmi_addr[0]);
2275 if (mc_ptr->next)
2276 *mc_addr_ptr = mc_ptr->next->dmi_addr;
2277 else
2278 *mc_addr_ptr = NULL;
2279
2280 return addr;
2281}
2282
9a799d71 2283/**
2c5645cf 2284 * ixgbe_set_rx_mode - Unicast, Multicast and Promiscuous mode set
9a799d71
AK
2285 * @netdev: network interface device structure
2286 *
2c5645cf
CL
2287 * The set_rx_method entry point is called whenever the unicast/multicast
2288 * address list or the network interface flags are updated. This routine is
2289 * responsible for configuring the hardware for proper unicast, multicast and
2290 * promiscuous mode.
9a799d71 2291 **/
2c5645cf 2292static void ixgbe_set_rx_mode(struct net_device *netdev)
9a799d71
AK
2293{
2294 struct ixgbe_adapter *adapter = netdev_priv(netdev);
2295 struct ixgbe_hw *hw = &adapter->hw;
3d01625a 2296 u32 fctrl, vlnctrl;
2c5645cf
CL
2297 u8 *addr_list = NULL;
2298 int addr_count = 0;
9a799d71
AK
2299
2300 /* Check for Promiscuous and All Multicast modes */
2301
2302 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
3d01625a 2303 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
9a799d71
AK
2304
2305 if (netdev->flags & IFF_PROMISC) {
2c5645cf 2306 hw->addr_ctrl.user_set_promisc = 1;
9a799d71 2307 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
3d01625a 2308 vlnctrl &= ~IXGBE_VLNCTRL_VFE;
9a799d71 2309 } else {
746b9f02
PM
2310 if (netdev->flags & IFF_ALLMULTI) {
2311 fctrl |= IXGBE_FCTRL_MPE;
2312 fctrl &= ~IXGBE_FCTRL_UPE;
2313 } else {
2314 fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
2315 }
3d01625a 2316 vlnctrl |= IXGBE_VLNCTRL_VFE;
2c5645cf 2317 hw->addr_ctrl.user_set_promisc = 0;
9a799d71
AK
2318 }
2319
2320 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
3d01625a 2321 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
9a799d71 2322
2c5645cf 2323 /* reprogram secondary unicast list */
ccffad25 2324 hw->mac.ops.update_uc_addr_list(hw, &netdev->uc_list);
9a799d71 2325
2c5645cf
CL
2326 /* reprogram multicast list */
2327 addr_count = netdev->mc_count;
2328 if (addr_count)
2329 addr_list = netdev->mc_list->dmi_addr;
c44ade9e
JB
2330 hw->mac.ops.update_mc_addr_list(hw, addr_list, addr_count,
2331 ixgbe_addr_list_itr);
9a799d71
AK
2332}
2333
021230d4
AV
2334static void ixgbe_napi_enable_all(struct ixgbe_adapter *adapter)
2335{
2336 int q_idx;
2337 struct ixgbe_q_vector *q_vector;
2338 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
2339
2340 /* legacy and MSI only use one vector */
2341 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
2342 q_vectors = 1;
2343
2344 for (q_idx = 0; q_idx < q_vectors; q_idx++) {
f0848276 2345 struct napi_struct *napi;
7a921c93 2346 q_vector = adapter->q_vector[q_idx];
f0848276 2347 napi = &q_vector->napi;
91281fd3
AD
2348 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
2349 if (!q_vector->rxr_count || !q_vector->txr_count) {
2350 if (q_vector->txr_count == 1)
2351 napi->poll = &ixgbe_clean_txonly;
2352 else if (q_vector->rxr_count == 1)
2353 napi->poll = &ixgbe_clean_rxonly;
2354 }
2355 }
f0848276
JB
2356
2357 napi_enable(napi);
021230d4
AV
2358 }
2359}
2360
2361static void ixgbe_napi_disable_all(struct ixgbe_adapter *adapter)
2362{
2363 int q_idx;
2364 struct ixgbe_q_vector *q_vector;
2365 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
2366
2367 /* legacy and MSI only use one vector */
2368 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
2369 q_vectors = 1;
2370
2371 for (q_idx = 0; q_idx < q_vectors; q_idx++) {
7a921c93 2372 q_vector = adapter->q_vector[q_idx];
021230d4
AV
2373 napi_disable(&q_vector->napi);
2374 }
2375}
2376
7a6b6f51 2377#ifdef CONFIG_IXGBE_DCB
2f90b865
AD
2378/*
2379 * ixgbe_configure_dcb - Configure DCB hardware
2380 * @adapter: ixgbe adapter struct
2381 *
2382 * This is called by the driver on open to configure the DCB hardware.
2383 * This is also called by the gennetlink interface when reconfiguring
2384 * the DCB state.
2385 */
2386static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter)
2387{
2388 struct ixgbe_hw *hw = &adapter->hw;
2389 u32 txdctl, vlnctrl;
2390 int i, j;
2391
2392 ixgbe_dcb_check_config(&adapter->dcb_cfg);
2393 ixgbe_dcb_calculate_tc_credits(&adapter->dcb_cfg, DCB_TX_CONFIG);
2394 ixgbe_dcb_calculate_tc_credits(&adapter->dcb_cfg, DCB_RX_CONFIG);
2395
2396 /* reconfigure the hardware */
2397 ixgbe_dcb_hw_config(&adapter->hw, &adapter->dcb_cfg);
2398
2399 for (i = 0; i < adapter->num_tx_queues; i++) {
2400 j = adapter->tx_ring[i].reg_idx;
2401 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j));
2402 /* PThresh workaround for Tx hang with DFP enabled. */
2403 txdctl |= 32;
2404 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j), txdctl);
2405 }
2406 /* Enable VLAN tag insert/strip */
2407 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
e8e26350
PW
2408 if (hw->mac.type == ixgbe_mac_82598EB) {
2409 vlnctrl |= IXGBE_VLNCTRL_VME | IXGBE_VLNCTRL_VFE;
2410 vlnctrl &= ~IXGBE_VLNCTRL_CFIEN;
2411 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
2412 } else if (hw->mac.type == ixgbe_mac_82599EB) {
2413 vlnctrl |= IXGBE_VLNCTRL_VFE;
2414 vlnctrl &= ~IXGBE_VLNCTRL_CFIEN;
2415 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
2416 for (i = 0; i < adapter->num_rx_queues; i++) {
2417 j = adapter->rx_ring[i].reg_idx;
2418 vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j));
2419 vlnctrl |= IXGBE_RXDCTL_VME;
2420 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), vlnctrl);
2421 }
2422 }
2f90b865
AD
2423 hw->mac.ops.set_vfta(&adapter->hw, 0, 0, true);
2424}
2425
2426#endif
9a799d71
AK
2427static void ixgbe_configure(struct ixgbe_adapter *adapter)
2428{
2429 struct net_device *netdev = adapter->netdev;
c4cf55e5 2430 struct ixgbe_hw *hw = &adapter->hw;
9a799d71
AK
2431 int i;
2432
2c5645cf 2433 ixgbe_set_rx_mode(netdev);
9a799d71
AK
2434
2435 ixgbe_restore_vlan(adapter);
7a6b6f51 2436#ifdef CONFIG_IXGBE_DCB
2f90b865
AD
2437 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
2438 netif_set_gso_max_size(netdev, 32768);
2439 ixgbe_configure_dcb(adapter);
2440 } else {
2441 netif_set_gso_max_size(netdev, 65536);
2442 }
2443#else
2444 netif_set_gso_max_size(netdev, 65536);
2445#endif
9a799d71 2446
eacd73f7
YZ
2447#ifdef IXGBE_FCOE
2448 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)
2449 ixgbe_configure_fcoe(adapter);
2450
2451#endif /* IXGBE_FCOE */
c4cf55e5
PWJ
2452 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
2453 for (i = 0; i < adapter->num_tx_queues; i++)
2454 adapter->tx_ring[i].atr_sample_rate =
2455 adapter->atr_sample_rate;
2456 ixgbe_init_fdir_signature_82599(hw, adapter->fdir_pballoc);
2457 } else if (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) {
2458 ixgbe_init_fdir_perfect_82599(hw, adapter->fdir_pballoc);
2459 }
2460
9a799d71
AK
2461 ixgbe_configure_tx(adapter);
2462 ixgbe_configure_rx(adapter);
2463 for (i = 0; i < adapter->num_rx_queues; i++)
2464 ixgbe_alloc_rx_buffers(adapter, &adapter->rx_ring[i],
b4617240 2465 (adapter->rx_ring[i].count - 1));
9a799d71
AK
2466}
2467
e8e26350
PW
2468static inline bool ixgbe_is_sfp(struct ixgbe_hw *hw)
2469{
2470 switch (hw->phy.type) {
2471 case ixgbe_phy_sfp_avago:
2472 case ixgbe_phy_sfp_ftl:
2473 case ixgbe_phy_sfp_intel:
2474 case ixgbe_phy_sfp_unknown:
2475 case ixgbe_phy_tw_tyco:
2476 case ixgbe_phy_tw_unknown:
2477 return true;
2478 default:
2479 return false;
2480 }
2481}
2482
0ecc061d 2483/**
e8e26350
PW
2484 * ixgbe_sfp_link_config - set up SFP+ link
2485 * @adapter: pointer to private adapter struct
2486 **/
2487static void ixgbe_sfp_link_config(struct ixgbe_adapter *adapter)
2488{
2489 struct ixgbe_hw *hw = &adapter->hw;
2490
2491 if (hw->phy.multispeed_fiber) {
2492 /*
2493 * In multispeed fiber setups, the device may not have
2494 * had a physical connection when the driver loaded.
2495 * If that's the case, the initial link configuration
2496 * couldn't get the MAC into 10G or 1G mode, so we'll
2497 * never have a link status change interrupt fire.
2498 * We need to try and force an autonegotiation
2499 * session, then bring up link.
2500 */
2501 hw->mac.ops.setup_sfp(hw);
2502 if (!(adapter->flags & IXGBE_FLAG_IN_SFP_LINK_TASK))
2503 schedule_work(&adapter->multispeed_fiber_task);
2504 } else {
2505 /*
2506 * Direct Attach Cu and non-multispeed fiber modules
2507 * still need to be configured properly prior to
2508 * attempting link.
2509 */
2510 if (!(adapter->flags & IXGBE_FLAG_IN_SFP_MOD_TASK))
2511 schedule_work(&adapter->sfp_config_module_task);
2512 }
2513}
2514
2515/**
2516 * ixgbe_non_sfp_link_config - set up non-SFP+ link
0ecc061d
PWJ
2517 * @hw: pointer to private hardware struct
2518 *
2519 * Returns 0 on success, negative on failure
2520 **/
e8e26350 2521static int ixgbe_non_sfp_link_config(struct ixgbe_hw *hw)
0ecc061d
PWJ
2522{
2523 u32 autoneg;
2524 bool link_up = false;
2525 u32 ret = IXGBE_ERR_LINK_SETUP;
2526
2527 if (hw->mac.ops.check_link)
2528 ret = hw->mac.ops.check_link(hw, &autoneg, &link_up, false);
2529
2530 if (ret)
2531 goto link_cfg_out;
2532
2533 if (hw->mac.ops.get_link_capabilities)
2534 ret = hw->mac.ops.get_link_capabilities(hw, &autoneg,
2535 &hw->mac.autoneg);
2536 if (ret)
2537 goto link_cfg_out;
2538
2539 if (hw->mac.ops.setup_link_speed)
2540 ret = hw->mac.ops.setup_link_speed(hw, autoneg, true, link_up);
0ecc061d
PWJ
2541link_cfg_out:
2542 return ret;
2543}
2544
e8e26350
PW
2545#define IXGBE_MAX_RX_DESC_POLL 10
2546static inline void ixgbe_rx_desc_queue_enable(struct ixgbe_adapter *adapter,
2547 int rxr)
2548{
2549 int j = adapter->rx_ring[rxr].reg_idx;
2550 int k;
2551
2552 for (k = 0; k < IXGBE_MAX_RX_DESC_POLL; k++) {
2553 if (IXGBE_READ_REG(&adapter->hw,
2554 IXGBE_RXDCTL(j)) & IXGBE_RXDCTL_ENABLE)
2555 break;
2556 else
2557 msleep(1);
2558 }
2559 if (k >= IXGBE_MAX_RX_DESC_POLL) {
2560 DPRINTK(DRV, ERR, "RXDCTL.ENABLE on Rx queue %d "
2561 "not set within the polling period\n", rxr);
2562 }
2563 ixgbe_release_rx_desc(&adapter->hw, &adapter->rx_ring[rxr],
2564 (adapter->rx_ring[rxr].count - 1));
2565}
2566
9a799d71
AK
2567static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
2568{
2569 struct net_device *netdev = adapter->netdev;
9a799d71 2570 struct ixgbe_hw *hw = &adapter->hw;
021230d4 2571 int i, j = 0;
e8e26350 2572 int num_rx_rings = adapter->num_rx_queues;
0ecc061d 2573 int err;
9a799d71 2574 int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
021230d4 2575 u32 txdctl, rxdctl, mhadd;
e8e26350 2576 u32 dmatxctl;
021230d4 2577 u32 gpie;
9a799d71 2578
5eba3699
AV
2579 ixgbe_get_hw_control(adapter);
2580
021230d4
AV
2581 if ((adapter->flags & IXGBE_FLAG_MSIX_ENABLED) ||
2582 (adapter->flags & IXGBE_FLAG_MSI_ENABLED)) {
9a799d71
AK
2583 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
2584 gpie = (IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_EIAME |
b4617240 2585 IXGBE_GPIE_PBA_SUPPORT | IXGBE_GPIE_OCD);
9a799d71
AK
2586 } else {
2587 /* MSI only */
021230d4 2588 gpie = 0;
9a799d71 2589 }
021230d4
AV
2590 /* XXX: to interrupt immediately for EICS writes, enable this */
2591 /* gpie |= IXGBE_GPIE_EIMEN; */
2592 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
9a799d71
AK
2593 }
2594
021230d4
AV
2595 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) {
2596 /* legacy interrupts, use EIAM to auto-mask when reading EICR,
2597 * specifically only auto mask tx and rx interrupts */
2598 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
2599 }
9a799d71 2600
0befdb3e
JB
2601 /* Enable fan failure interrupt if media type is copper */
2602 if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) {
2603 gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
2604 gpie |= IXGBE_SDP1_GPIEN;
2605 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
2606 }
2607
e8e26350
PW
2608 if (hw->mac.type == ixgbe_mac_82599EB) {
2609 gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
2610 gpie |= IXGBE_SDP1_GPIEN;
2611 gpie |= IXGBE_SDP2_GPIEN;
2612 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
2613 }
2614
63f39bd1
YZ
2615#ifdef IXGBE_FCOE
2616 /* adjust max frame to be able to do baby jumbo for FCoE */
2617 if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED) &&
2618 (max_frame < IXGBE_FCOE_JUMBO_FRAME_SIZE))
2619 max_frame = IXGBE_FCOE_JUMBO_FRAME_SIZE;
2620
2621#endif /* IXGBE_FCOE */
021230d4 2622 mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
9a799d71
AK
2623 if (max_frame != (mhadd >> IXGBE_MHADD_MFS_SHIFT)) {
2624 mhadd &= ~IXGBE_MHADD_MFS_MASK;
2625 mhadd |= max_frame << IXGBE_MHADD_MFS_SHIFT;
2626
2627 IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
2628 }
2629
2630 for (i = 0; i < adapter->num_tx_queues; i++) {
021230d4
AV
2631 j = adapter->tx_ring[i].reg_idx;
2632 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j));
e01c31a5
JB
2633 /* enable WTHRESH=8 descriptors, to encourage burst writeback */
2634 txdctl |= (8 << 16);
e8e26350
PW
2635 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j), txdctl);
2636 }
2637
2638 if (hw->mac.type == ixgbe_mac_82599EB) {
2639 /* DMATXCTL.EN must be set after all Tx queue config is done */
2640 dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
2641 dmatxctl |= IXGBE_DMATXCTL_TE;
2642 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
2643 }
2644 for (i = 0; i < adapter->num_tx_queues; i++) {
2645 j = adapter->tx_ring[i].reg_idx;
2646 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j));
9a799d71 2647 txdctl |= IXGBE_TXDCTL_ENABLE;
021230d4 2648 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j), txdctl);
9a799d71
AK
2649 }
2650
e8e26350 2651 for (i = 0; i < num_rx_rings; i++) {
021230d4
AV
2652 j = adapter->rx_ring[i].reg_idx;
2653 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j));
2654 /* enable PTHRESH=32 descriptors (half the internal cache)
2655 * and HTHRESH=0 descriptors (to minimize latency on fetch),
2656 * this also removes a pesky rx_no_buffer_count increment */
2657 rxdctl |= 0x0020;
9a799d71 2658 rxdctl |= IXGBE_RXDCTL_ENABLE;
021230d4 2659 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), rxdctl);
e8e26350
PW
2660 if (hw->mac.type == ixgbe_mac_82599EB)
2661 ixgbe_rx_desc_queue_enable(adapter, i);
9a799d71
AK
2662 }
2663 /* enable all receives */
2664 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
e8e26350
PW
2665 if (hw->mac.type == ixgbe_mac_82598EB)
2666 rxdctl |= (IXGBE_RXCTRL_DMBYPS | IXGBE_RXCTRL_RXEN);
2667 else
2668 rxdctl |= IXGBE_RXCTRL_RXEN;
2669 hw->mac.ops.enable_rx_dma(hw, rxdctl);
9a799d71
AK
2670
2671 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
2672 ixgbe_configure_msix(adapter);
2673 else
2674 ixgbe_configure_msi_and_legacy(adapter);
2675
2676 clear_bit(__IXGBE_DOWN, &adapter->state);
021230d4
AV
2677 ixgbe_napi_enable_all(adapter);
2678
2679 /* clear any pending interrupts, may auto mask */
2680 IXGBE_READ_REG(hw, IXGBE_EICR);
2681
9a799d71
AK
2682 ixgbe_irq_enable(adapter);
2683
bf069c97
DS
2684 /*
2685 * If this adapter has a fan, check to see if we had a failure
2686 * before we enabled the interrupt.
2687 */
2688 if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) {
2689 u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
2690 if (esdp & IXGBE_ESDP_SDP1)
2691 DPRINTK(DRV, CRIT,
2692 "Fan has stopped, replace the adapter\n");
2693 }
2694
e8e26350
PW
2695 /*
2696 * For hot-pluggable SFP+ devices, a new SFP+ module may have
2697 * arrived before interrupts were enabled. We need to kick off
2698 * the SFP+ module setup first, then try to bring up link.
2699 * If we're not hot-pluggable SFP+, we just need to configure link
2700 * and bring it up.
2701 */
2702 err = hw->phy.ops.identify(hw);
2703 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
2704 DPRINTK(PROBE, ERR, "PHY not supported on this NIC %d\n", err);
2705 ixgbe_down(adapter);
2706 return err;
2707 }
2708
2709 if (ixgbe_is_sfp(hw)) {
2710 ixgbe_sfp_link_config(adapter);
2711 } else {
2712 err = ixgbe_non_sfp_link_config(hw);
2713 if (err)
2714 DPRINTK(PROBE, ERR, "link_config FAILED %d\n", err);
2715 }
0ecc061d 2716
c4cf55e5
PWJ
2717 for (i = 0; i < adapter->num_tx_queues; i++)
2718 set_bit(__IXGBE_FDIR_INIT_DONE,
2719 &(adapter->tx_ring[i].reinit_state));
2720
1da100bb
PWJ
2721 /* enable transmits */
2722 netif_tx_start_all_queues(netdev);
2723
9a799d71
AK
2724 /* bring the link up in the watchdog, this could race with our first
2725 * link up interrupt but shouldn't be a problem */
cf8280ee
JB
2726 adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
2727 adapter->link_check_timeout = jiffies;
9a799d71
AK
2728 mod_timer(&adapter->watchdog_timer, jiffies);
2729 return 0;
2730}
2731
d4f80882
AV
2732void ixgbe_reinit_locked(struct ixgbe_adapter *adapter)
2733{
2734 WARN_ON(in_interrupt());
2735 while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state))
2736 msleep(1);
2737 ixgbe_down(adapter);
2738 ixgbe_up(adapter);
2739 clear_bit(__IXGBE_RESETTING, &adapter->state);
2740}
2741
9a799d71
AK
2742int ixgbe_up(struct ixgbe_adapter *adapter)
2743{
2744 /* hardware has been reset, we need to reload some things */
2745 ixgbe_configure(adapter);
2746
2747 return ixgbe_up_complete(adapter);
2748}
2749
2750void ixgbe_reset(struct ixgbe_adapter *adapter)
2751{
c44ade9e 2752 struct ixgbe_hw *hw = &adapter->hw;
8ca783ab
DS
2753 int err;
2754
2755 err = hw->mac.ops.init_hw(hw);
da4dd0f7
PWJ
2756 switch (err) {
2757 case 0:
2758 case IXGBE_ERR_SFP_NOT_PRESENT:
2759 break;
2760 case IXGBE_ERR_MASTER_REQUESTS_PENDING:
2761 dev_err(&adapter->pdev->dev, "master disable timed out\n");
2762 break;
794caeb2
PWJ
2763 case IXGBE_ERR_EEPROM_VERSION:
2764 /* We are running on a pre-production device, log a warning */
2765 dev_warn(&adapter->pdev->dev, "This device is a pre-production "
2766 "adapter/LOM. Please be aware there may be issues "
2767 "associated with your hardware. If you are "
2768 "experiencing problems please contact your Intel or "
2769 "hardware representative who provided you with this "
2770 "hardware.\n");
2771 break;
da4dd0f7
PWJ
2772 default:
2773 dev_err(&adapter->pdev->dev, "Hardware Error: %d\n", err);
2774 }
9a799d71
AK
2775
2776 /* reprogram the RAR[0] in case user changed it. */
c44ade9e 2777 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
9a799d71
AK
2778}
2779
9a799d71
AK
2780/**
2781 * ixgbe_clean_rx_ring - Free Rx Buffers per Queue
2782 * @adapter: board private structure
2783 * @rx_ring: ring to free buffers from
2784 **/
2785static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter,
b4617240 2786 struct ixgbe_ring *rx_ring)
9a799d71
AK
2787{
2788 struct pci_dev *pdev = adapter->pdev;
2789 unsigned long size;
2790 unsigned int i;
2791
2792 /* Free all the Rx ring sk_buffs */
2793
2794 for (i = 0; i < rx_ring->count; i++) {
2795 struct ixgbe_rx_buffer *rx_buffer_info;
2796
2797 rx_buffer_info = &rx_ring->rx_buffer_info[i];
2798 if (rx_buffer_info->dma) {
2799 pci_unmap_single(pdev, rx_buffer_info->dma,
b4617240
PW
2800 rx_ring->rx_buf_len,
2801 PCI_DMA_FROMDEVICE);
9a799d71
AK
2802 rx_buffer_info->dma = 0;
2803 }
2804 if (rx_buffer_info->skb) {
f8212f97 2805 struct sk_buff *skb = rx_buffer_info->skb;
9a799d71 2806 rx_buffer_info->skb = NULL;
f8212f97
AD
2807 do {
2808 struct sk_buff *this = skb;
2809 skb = skb->prev;
2810 dev_kfree_skb(this);
2811 } while (skb);
9a799d71
AK
2812 }
2813 if (!rx_buffer_info->page)
2814 continue;
762f4c57
JB
2815 pci_unmap_page(pdev, rx_buffer_info->page_dma, PAGE_SIZE / 2,
2816 PCI_DMA_FROMDEVICE);
9a799d71 2817 rx_buffer_info->page_dma = 0;
9a799d71
AK
2818 put_page(rx_buffer_info->page);
2819 rx_buffer_info->page = NULL;
762f4c57 2820 rx_buffer_info->page_offset = 0;
9a799d71
AK
2821 }
2822
2823 size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count;
2824 memset(rx_ring->rx_buffer_info, 0, size);
2825
2826 /* Zero out the descriptor ring */
2827 memset(rx_ring->desc, 0, rx_ring->size);
2828
2829 rx_ring->next_to_clean = 0;
2830 rx_ring->next_to_use = 0;
2831
9891ca7c
JB
2832 if (rx_ring->head)
2833 writel(0, adapter->hw.hw_addr + rx_ring->head);
2834 if (rx_ring->tail)
2835 writel(0, adapter->hw.hw_addr + rx_ring->tail);
9a799d71
AK
2836}
2837
2838/**
2839 * ixgbe_clean_tx_ring - Free Tx Buffers
2840 * @adapter: board private structure
2841 * @tx_ring: ring to be cleaned
2842 **/
2843static void ixgbe_clean_tx_ring(struct ixgbe_adapter *adapter,
b4617240 2844 struct ixgbe_ring *tx_ring)
9a799d71
AK
2845{
2846 struct ixgbe_tx_buffer *tx_buffer_info;
2847 unsigned long size;
2848 unsigned int i;
2849
2850 /* Free all the Tx ring sk_buffs */
2851
2852 for (i = 0; i < tx_ring->count; i++) {
2853 tx_buffer_info = &tx_ring->tx_buffer_info[i];
2854 ixgbe_unmap_and_free_tx_resource(adapter, tx_buffer_info);
2855 }
2856
2857 size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count;
2858 memset(tx_ring->tx_buffer_info, 0, size);
2859
2860 /* Zero out the descriptor ring */
2861 memset(tx_ring->desc, 0, tx_ring->size);
2862
2863 tx_ring->next_to_use = 0;
2864 tx_ring->next_to_clean = 0;
2865
9891ca7c
JB
2866 if (tx_ring->head)
2867 writel(0, adapter->hw.hw_addr + tx_ring->head);
2868 if (tx_ring->tail)
2869 writel(0, adapter->hw.hw_addr + tx_ring->tail);
9a799d71
AK
2870}
2871
2872/**
021230d4 2873 * ixgbe_clean_all_rx_rings - Free Rx Buffers for all queues
9a799d71
AK
2874 * @adapter: board private structure
2875 **/
021230d4 2876static void ixgbe_clean_all_rx_rings(struct ixgbe_adapter *adapter)
9a799d71
AK
2877{
2878 int i;
2879
021230d4
AV
2880 for (i = 0; i < adapter->num_rx_queues; i++)
2881 ixgbe_clean_rx_ring(adapter, &adapter->rx_ring[i]);
9a799d71
AK
2882}
2883
2884/**
021230d4 2885 * ixgbe_clean_all_tx_rings - Free Tx Buffers for all queues
9a799d71
AK
2886 * @adapter: board private structure
2887 **/
021230d4 2888static void ixgbe_clean_all_tx_rings(struct ixgbe_adapter *adapter)
9a799d71
AK
2889{
2890 int i;
2891
021230d4
AV
2892 for (i = 0; i < adapter->num_tx_queues; i++)
2893 ixgbe_clean_tx_ring(adapter, &adapter->tx_ring[i]);
9a799d71
AK
2894}
2895
2896void ixgbe_down(struct ixgbe_adapter *adapter)
2897{
2898 struct net_device *netdev = adapter->netdev;
7f821875 2899 struct ixgbe_hw *hw = &adapter->hw;
9a799d71 2900 u32 rxctrl;
7f821875
JB
2901 u32 txdctl;
2902 int i, j;
9a799d71
AK
2903
2904 /* signal that we are down to the interrupt handler */
2905 set_bit(__IXGBE_DOWN, &adapter->state);
2906
2907 /* disable receives */
7f821875
JB
2908 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
2909 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN);
9a799d71
AK
2910
2911 netif_tx_disable(netdev);
2912
7f821875 2913 IXGBE_WRITE_FLUSH(hw);
9a799d71
AK
2914 msleep(10);
2915
7f821875
JB
2916 netif_tx_stop_all_queues(netdev);
2917
9a799d71
AK
2918 ixgbe_irq_disable(adapter);
2919
021230d4 2920 ixgbe_napi_disable_all(adapter);
7f821875 2921
9a799d71 2922 del_timer_sync(&adapter->watchdog_timer);
cf8280ee 2923 cancel_work_sync(&adapter->watchdog_task);
9a799d71 2924
c4cf55e5
PWJ
2925 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE ||
2926 adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)
2927 cancel_work_sync(&adapter->fdir_reinit_task);
2928
7f821875
JB
2929 /* disable transmits in the hardware now that interrupts are off */
2930 for (i = 0; i < adapter->num_tx_queues; i++) {
2931 j = adapter->tx_ring[i].reg_idx;
2932 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j));
2933 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j),
2934 (txdctl & ~IXGBE_TXDCTL_ENABLE));
2935 }
88512539
PW
2936 /* Disable the Tx DMA engine on 82599 */
2937 if (hw->mac.type == ixgbe_mac_82599EB)
2938 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL,
2939 (IXGBE_READ_REG(hw, IXGBE_DMATXCTL) &
2940 ~IXGBE_DMATXCTL_TE));
7f821875 2941
9a799d71 2942 netif_carrier_off(netdev);
9a799d71 2943
6f4a0e45
PL
2944 if (!pci_channel_offline(adapter->pdev))
2945 ixgbe_reset(adapter);
9a799d71
AK
2946 ixgbe_clean_all_tx_rings(adapter);
2947 ixgbe_clean_all_rx_rings(adapter);
2948
5dd2d332 2949#ifdef CONFIG_IXGBE_DCA
96b0e0f6 2950 /* since we reset the hardware DCA settings were cleared */
e35ec126 2951 ixgbe_setup_dca(adapter);
96b0e0f6 2952#endif
9a799d71
AK
2953}
2954
9a799d71 2955/**
021230d4
AV
2956 * ixgbe_poll - NAPI Rx polling callback
2957 * @napi: structure for representing this polling device
2958 * @budget: how many packets driver is allowed to clean
2959 *
2960 * This function is used for legacy and MSI, NAPI mode
9a799d71 2961 **/
021230d4 2962static int ixgbe_poll(struct napi_struct *napi, int budget)
9a799d71 2963{
9a1a69ad
JB
2964 struct ixgbe_q_vector *q_vector =
2965 container_of(napi, struct ixgbe_q_vector, napi);
021230d4 2966 struct ixgbe_adapter *adapter = q_vector->adapter;
9a1a69ad 2967 int tx_clean_complete, work_done = 0;
9a799d71 2968
5dd2d332 2969#ifdef CONFIG_IXGBE_DCA
bd0362dd
JC
2970 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
2971 ixgbe_update_tx_dca(adapter, adapter->tx_ring);
2972 ixgbe_update_rx_dca(adapter, adapter->rx_ring);
2973 }
2974#endif
2975
fe49f04a 2976 tx_clean_complete = ixgbe_clean_tx_irq(q_vector, adapter->tx_ring);
78b6f4ce 2977 ixgbe_clean_rx_irq(q_vector, adapter->rx_ring, &work_done, budget);
9a799d71 2978
9a1a69ad 2979 if (!tx_clean_complete)
d2c7ddd6
DM
2980 work_done = budget;
2981
53e52c72
DM
2982 /* If budget not fully consumed, exit the polling mode */
2983 if (work_done < budget) {
288379f0 2984 napi_complete(napi);
509ee935 2985 if (adapter->itr_setting & 1)
f494e8fa 2986 ixgbe_set_itr(adapter);
d4f80882 2987 if (!test_bit(__IXGBE_DOWN, &adapter->state))
835462fc 2988 ixgbe_irq_enable_queues(adapter, IXGBE_EIMS_RTX_QUEUE);
9a799d71 2989 }
9a799d71
AK
2990 return work_done;
2991}
2992
2993/**
2994 * ixgbe_tx_timeout - Respond to a Tx Hang
2995 * @netdev: network interface device structure
2996 **/
2997static void ixgbe_tx_timeout(struct net_device *netdev)
2998{
2999 struct ixgbe_adapter *adapter = netdev_priv(netdev);
3000
3001 /* Do the reset outside of interrupt context */
3002 schedule_work(&adapter->reset_task);
3003}
3004
3005static void ixgbe_reset_task(struct work_struct *work)
3006{
3007 struct ixgbe_adapter *adapter;
3008 adapter = container_of(work, struct ixgbe_adapter, reset_task);
3009
2f90b865
AD
3010 /* If we're already down or resetting, just bail */
3011 if (test_bit(__IXGBE_DOWN, &adapter->state) ||
3012 test_bit(__IXGBE_RESETTING, &adapter->state))
3013 return;
3014
9a799d71
AK
3015 adapter->tx_timeout_count++;
3016
d4f80882 3017 ixgbe_reinit_locked(adapter);
9a799d71
AK
3018}
3019
bc97114d
PWJ
3020#ifdef CONFIG_IXGBE_DCB
3021static inline bool ixgbe_set_dcb_queues(struct ixgbe_adapter *adapter)
b9804972 3022{
bc97114d 3023 bool ret = false;
0cefafad 3024 struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_DCB];
b9804972 3025
0cefafad
JB
3026 if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED))
3027 return ret;
3028
3029 f->mask = 0x7 << 3;
3030 adapter->num_rx_queues = f->indices;
3031 adapter->num_tx_queues = f->indices;
3032 ret = true;
2f90b865 3033
bc97114d
PWJ
3034 return ret;
3035}
3036#endif
3037
4df10466
JB
3038/**
3039 * ixgbe_set_rss_queues: Allocate queues for RSS
3040 * @adapter: board private structure to initialize
3041 *
3042 * This is our "base" multiqueue mode. RSS (Receive Side Scaling) will try
3043 * to allocate one Rx queue per CPU, and if available, one Tx queue per CPU.
3044 *
3045 **/
bc97114d
PWJ
3046static inline bool ixgbe_set_rss_queues(struct ixgbe_adapter *adapter)
3047{
3048 bool ret = false;
0cefafad 3049 struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_RSS];
bc97114d
PWJ
3050
3051 if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
0cefafad
JB
3052 f->mask = 0xF;
3053 adapter->num_rx_queues = f->indices;
3054 adapter->num_tx_queues = f->indices;
bc97114d
PWJ
3055 ret = true;
3056 } else {
bc97114d 3057 ret = false;
b9804972
JB
3058 }
3059
bc97114d
PWJ
3060 return ret;
3061}
3062
c4cf55e5
PWJ
3063/**
3064 * ixgbe_set_fdir_queues: Allocate queues for Flow Director
3065 * @adapter: board private structure to initialize
3066 *
3067 * Flow Director is an advanced Rx filter, attempting to get Rx flows back
3068 * to the original CPU that initiated the Tx session. This runs in addition
3069 * to RSS, so if a packet doesn't match an FDIR filter, we can still spread the
3070 * Rx load across CPUs using RSS.
3071 *
3072 **/
3073static bool inline ixgbe_set_fdir_queues(struct ixgbe_adapter *adapter)
3074{
3075 bool ret = false;
3076 struct ixgbe_ring_feature *f_fdir = &adapter->ring_feature[RING_F_FDIR];
3077
3078 f_fdir->indices = min((int)num_online_cpus(), f_fdir->indices);
3079 f_fdir->mask = 0;
3080
3081 /* Flow Director must have RSS enabled */
3082 if (adapter->flags & IXGBE_FLAG_RSS_ENABLED &&
3083 ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE ||
3084 (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)))) {
3085 adapter->num_tx_queues = f_fdir->indices;
3086 adapter->num_rx_queues = f_fdir->indices;
3087 ret = true;
3088 } else {
3089 adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
3090 adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
3091 }
3092 return ret;
3093}
3094
0331a832
YZ
3095#ifdef IXGBE_FCOE
3096/**
3097 * ixgbe_set_fcoe_queues: Allocate queues for Fiber Channel over Ethernet (FCoE)
3098 * @adapter: board private structure to initialize
3099 *
3100 * FCoE RX FCRETA can use up to 8 rx queues for up to 8 different exchanges.
3101 * The ring feature mask is not used as a mask for FCoE, as it can take any 8
3102 * rx queues out of the max number of rx queues, instead, it is used as the
3103 * index of the first rx queue used by FCoE.
3104 *
3105 **/
3106static inline bool ixgbe_set_fcoe_queues(struct ixgbe_adapter *adapter)
3107{
3108 bool ret = false;
3109 struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_FCOE];
3110
3111 f->indices = min((int)num_online_cpus(), f->indices);
3112 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
3113#ifdef CONFIG_IXGBE_DCB
3114 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
3115 DPRINTK(PROBE, INFO, "FCOE enabled with DCB \n");
3116 ixgbe_set_dcb_queues(adapter);
3117 }
3118#endif
3119 if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
3120 DPRINTK(PROBE, INFO, "FCOE enabled with RSS \n");
3121 ixgbe_set_rss_queues(adapter);
3122 }
3123 /* adding FCoE rx rings to the end */
3124 f->mask = adapter->num_rx_queues;
3125 adapter->num_rx_queues += f->indices;
3126 if (adapter->num_tx_queues == 0)
3127 adapter->num_tx_queues = f->indices;
3128
3129 ret = true;
3130 }
3131
3132 return ret;
3133}
3134
3135#endif /* IXGBE_FCOE */
4df10466
JB
3136/*
3137 * ixgbe_set_num_queues: Allocate queues for device, feature dependant
3138 * @adapter: board private structure to initialize
3139 *
3140 * This is the top level queue allocation routine. The order here is very
3141 * important, starting with the "most" number of features turned on at once,
3142 * and ending with the smallest set of features. This way large combinations
3143 * can be allocated if they're turned on, and smaller combinations are the
3144 * fallthrough conditions.
3145 *
3146 **/
bc97114d
PWJ
3147static void ixgbe_set_num_queues(struct ixgbe_adapter *adapter)
3148{
0331a832
YZ
3149#ifdef IXGBE_FCOE
3150 if (ixgbe_set_fcoe_queues(adapter))
3151 goto done;
3152
3153#endif /* IXGBE_FCOE */
bc97114d
PWJ
3154#ifdef CONFIG_IXGBE_DCB
3155 if (ixgbe_set_dcb_queues(adapter))
af22ab1b 3156 goto done;
bc97114d
PWJ
3157
3158#endif
c4cf55e5
PWJ
3159 if (ixgbe_set_fdir_queues(adapter))
3160 goto done;
3161
bc97114d 3162 if (ixgbe_set_rss_queues(adapter))
af22ab1b
WF
3163 goto done;
3164
3165 /* fallback to base case */
3166 adapter->num_rx_queues = 1;
3167 adapter->num_tx_queues = 1;
3168
3169done:
3170 /* Notify the stack of the (possibly) reduced Tx Queue count. */
3171 adapter->netdev->real_num_tx_queues = adapter->num_tx_queues;
b9804972
JB
3172}
3173
021230d4 3174static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter,
b4617240 3175 int vectors)
021230d4
AV
3176{
3177 int err, vector_threshold;
3178
3179 /* We'll want at least 3 (vector_threshold):
3180 * 1) TxQ[0] Cleanup
3181 * 2) RxQ[0] Cleanup
3182 * 3) Other (Link Status Change, etc.)
3183 * 4) TCP Timer (optional)
3184 */
3185 vector_threshold = MIN_MSIX_COUNT;
3186
3187 /* The more we get, the more we will assign to Tx/Rx Cleanup
3188 * for the separate queues...where Rx Cleanup >= Tx Cleanup.
3189 * Right now, we simply care about how many we'll get; we'll
3190 * set them up later while requesting irq's.
3191 */
3192 while (vectors >= vector_threshold) {
3193 err = pci_enable_msix(adapter->pdev, adapter->msix_entries,
b4617240 3194 vectors);
021230d4
AV
3195 if (!err) /* Success in acquiring all requested vectors. */
3196 break;
3197 else if (err < 0)
3198 vectors = 0; /* Nasty failure, quit now */
3199 else /* err == number of vectors we should try again with */
3200 vectors = err;
3201 }
3202
3203 if (vectors < vector_threshold) {
3204 /* Can't allocate enough MSI-X interrupts? Oh well.
3205 * This just means we'll go with either a single MSI
3206 * vector or fall back to legacy interrupts.
3207 */
3208 DPRINTK(HW, DEBUG, "Unable to allocate MSI-X interrupts\n");
3209 adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
3210 kfree(adapter->msix_entries);
3211 adapter->msix_entries = NULL;
021230d4
AV
3212 } else {
3213 adapter->flags |= IXGBE_FLAG_MSIX_ENABLED; /* Woot! */
eb7f139c
PWJ
3214 /*
3215 * Adjust for only the vectors we'll use, which is minimum
3216 * of max_msix_q_vectors + NON_Q_VECTORS, or the number of
3217 * vectors we were allocated.
3218 */
3219 adapter->num_msix_vectors = min(vectors,
3220 adapter->max_msix_q_vectors + NON_Q_VECTORS);
021230d4
AV
3221 }
3222}
3223
021230d4 3224/**
bc97114d 3225 * ixgbe_cache_ring_rss - Descriptor ring to register mapping for RSS
021230d4
AV
3226 * @adapter: board private structure to initialize
3227 *
bc97114d
PWJ
3228 * Cache the descriptor ring offsets for RSS to the assigned rings.
3229 *
021230d4 3230 **/
bc97114d 3231static inline bool ixgbe_cache_ring_rss(struct ixgbe_adapter *adapter)
021230d4 3232{
bc97114d
PWJ
3233 int i;
3234 bool ret = false;
3235
3236 if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
3237 for (i = 0; i < adapter->num_rx_queues; i++)
3238 adapter->rx_ring[i].reg_idx = i;
3239 for (i = 0; i < adapter->num_tx_queues; i++)
3240 adapter->tx_ring[i].reg_idx = i;
3241 ret = true;
3242 } else {
3243 ret = false;
3244 }
3245
3246 return ret;
3247}
3248
3249#ifdef CONFIG_IXGBE_DCB
3250/**
3251 * ixgbe_cache_ring_dcb - Descriptor ring to register mapping for DCB
3252 * @adapter: board private structure to initialize
3253 *
3254 * Cache the descriptor ring offsets for DCB to the assigned rings.
3255 *
3256 **/
3257static inline bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter)
3258{
3259 int i;
3260 bool ret = false;
3261 int dcb_i = adapter->ring_feature[RING_F_DCB].indices;
3262
3263 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
3264 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
2f90b865
AD
3265 /* the number of queues is assumed to be symmetric */
3266 for (i = 0; i < dcb_i; i++) {
3267 adapter->rx_ring[i].reg_idx = i << 3;
3268 adapter->tx_ring[i].reg_idx = i << 2;
3269 }
bc97114d 3270 ret = true;
e8e26350 3271 } else if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
f92ef202
PW
3272 if (dcb_i == 8) {
3273 /*
3274 * Tx TC0 starts at: descriptor queue 0
3275 * Tx TC1 starts at: descriptor queue 32
3276 * Tx TC2 starts at: descriptor queue 64
3277 * Tx TC3 starts at: descriptor queue 80
3278 * Tx TC4 starts at: descriptor queue 96
3279 * Tx TC5 starts at: descriptor queue 104
3280 * Tx TC6 starts at: descriptor queue 112
3281 * Tx TC7 starts at: descriptor queue 120
3282 *
3283 * Rx TC0-TC7 are offset by 16 queues each
3284 */
3285 for (i = 0; i < 3; i++) {
3286 adapter->tx_ring[i].reg_idx = i << 5;
3287 adapter->rx_ring[i].reg_idx = i << 4;
3288 }
3289 for ( ; i < 5; i++) {
3290 adapter->tx_ring[i].reg_idx =
3291 ((i + 2) << 4);
3292 adapter->rx_ring[i].reg_idx = i << 4;
3293 }
3294 for ( ; i < dcb_i; i++) {
3295 adapter->tx_ring[i].reg_idx =
3296 ((i + 8) << 3);
3297 adapter->rx_ring[i].reg_idx = i << 4;
3298 }
3299
3300 ret = true;
3301 } else if (dcb_i == 4) {
3302 /*
3303 * Tx TC0 starts at: descriptor queue 0
3304 * Tx TC1 starts at: descriptor queue 64
3305 * Tx TC2 starts at: descriptor queue 96
3306 * Tx TC3 starts at: descriptor queue 112
3307 *
3308 * Rx TC0-TC3 are offset by 32 queues each
3309 */
3310 adapter->tx_ring[0].reg_idx = 0;
3311 adapter->tx_ring[1].reg_idx = 64;
3312 adapter->tx_ring[2].reg_idx = 96;
3313 adapter->tx_ring[3].reg_idx = 112;
3314 for (i = 0 ; i < dcb_i; i++)
3315 adapter->rx_ring[i].reg_idx = i << 5;
3316
3317 ret = true;
3318 } else {
3319 ret = false;
e8e26350 3320 }
bc97114d
PWJ
3321 } else {
3322 ret = false;
021230d4 3323 }
bc97114d
PWJ
3324 } else {
3325 ret = false;
021230d4 3326 }
bc97114d
PWJ
3327
3328 return ret;
3329}
3330#endif
3331
c4cf55e5
PWJ
3332/**
3333 * ixgbe_cache_ring_fdir - Descriptor ring to register mapping for Flow Director
3334 * @adapter: board private structure to initialize
3335 *
3336 * Cache the descriptor ring offsets for Flow Director to the assigned rings.
3337 *
3338 **/
3339static bool inline ixgbe_cache_ring_fdir(struct ixgbe_adapter *adapter)
3340{
3341 int i;
3342 bool ret = false;
3343
3344 if (adapter->flags & IXGBE_FLAG_RSS_ENABLED &&
3345 ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) ||
3346 (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))) {
3347 for (i = 0; i < adapter->num_rx_queues; i++)
3348 adapter->rx_ring[i].reg_idx = i;
3349 for (i = 0; i < adapter->num_tx_queues; i++)
3350 adapter->tx_ring[i].reg_idx = i;
3351 ret = true;
3352 }
3353
3354 return ret;
3355}
3356
0331a832
YZ
3357#ifdef IXGBE_FCOE
3358/**
3359 * ixgbe_cache_ring_fcoe - Descriptor ring to register mapping for the FCoE
3360 * @adapter: board private structure to initialize
3361 *
3362 * Cache the descriptor ring offsets for FCoE mode to the assigned rings.
3363 *
3364 */
3365static inline bool ixgbe_cache_ring_fcoe(struct ixgbe_adapter *adapter)
3366{
3367 int i, fcoe_i = 0;
3368 bool ret = false;
3369 struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_FCOE];
3370
3371 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
3372#ifdef CONFIG_IXGBE_DCB
3373 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
3374 ixgbe_cache_ring_dcb(adapter);
3375 fcoe_i = adapter->rx_ring[0].reg_idx + 1;
3376 }
3377#endif /* CONFIG_IXGBE_DCB */
3378 if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
3379 ixgbe_cache_ring_rss(adapter);
3380 fcoe_i = f->mask;
3381 }
3382 for (i = 0; i < f->indices; i++, fcoe_i++)
3383 adapter->rx_ring[f->mask + i].reg_idx = fcoe_i;
3384 ret = true;
3385 }
3386 return ret;
3387}
3388
3389#endif /* IXGBE_FCOE */
bc97114d
PWJ
3390/**
3391 * ixgbe_cache_ring_register - Descriptor ring to register mapping
3392 * @adapter: board private structure to initialize
3393 *
3394 * Once we know the feature-set enabled for the device, we'll cache
3395 * the register offset the descriptor ring is assigned to.
3396 *
3397 * Note, the order the various feature calls is important. It must start with
3398 * the "most" features enabled at the same time, then trickle down to the
3399 * least amount of features turned on at once.
3400 **/
3401static void ixgbe_cache_ring_register(struct ixgbe_adapter *adapter)
3402{
3403 /* start with default case */
3404 adapter->rx_ring[0].reg_idx = 0;
3405 adapter->tx_ring[0].reg_idx = 0;
3406
0331a832
YZ
3407#ifdef IXGBE_FCOE
3408 if (ixgbe_cache_ring_fcoe(adapter))
3409 return;
3410
3411#endif /* IXGBE_FCOE */
bc97114d
PWJ
3412#ifdef CONFIG_IXGBE_DCB
3413 if (ixgbe_cache_ring_dcb(adapter))
3414 return;
3415
3416#endif
c4cf55e5
PWJ
3417 if (ixgbe_cache_ring_fdir(adapter))
3418 return;
3419
bc97114d
PWJ
3420 if (ixgbe_cache_ring_rss(adapter))
3421 return;
021230d4
AV
3422}
3423
9a799d71
AK
3424/**
3425 * ixgbe_alloc_queues - Allocate memory for all rings
3426 * @adapter: board private structure to initialize
3427 *
3428 * We allocate one ring per queue at run-time since we don't know the
4df10466
JB
3429 * number of queues at compile-time. The polling_netdev array is
3430 * intended for Multiqueue, but should work fine with a single queue.
9a799d71 3431 **/
2f90b865 3432static int ixgbe_alloc_queues(struct ixgbe_adapter *adapter)
9a799d71
AK
3433{
3434 int i;
3435
3436 adapter->tx_ring = kcalloc(adapter->num_tx_queues,
b4617240 3437 sizeof(struct ixgbe_ring), GFP_KERNEL);
9a799d71 3438 if (!adapter->tx_ring)
021230d4 3439 goto err_tx_ring_allocation;
9a799d71
AK
3440
3441 adapter->rx_ring = kcalloc(adapter->num_rx_queues,
b4617240 3442 sizeof(struct ixgbe_ring), GFP_KERNEL);
021230d4
AV
3443 if (!adapter->rx_ring)
3444 goto err_rx_ring_allocation;
9a799d71 3445
021230d4 3446 for (i = 0; i < adapter->num_tx_queues; i++) {
b9804972 3447 adapter->tx_ring[i].count = adapter->tx_ring_count;
021230d4
AV
3448 adapter->tx_ring[i].queue_index = i;
3449 }
b9804972 3450
9a799d71 3451 for (i = 0; i < adapter->num_rx_queues; i++) {
b9804972 3452 adapter->rx_ring[i].count = adapter->rx_ring_count;
021230d4
AV
3453 adapter->rx_ring[i].queue_index = i;
3454 }
3455
3456 ixgbe_cache_ring_register(adapter);
3457
3458 return 0;
3459
3460err_rx_ring_allocation:
3461 kfree(adapter->tx_ring);
3462err_tx_ring_allocation:
3463 return -ENOMEM;
3464}
3465
3466/**
3467 * ixgbe_set_interrupt_capability - set MSI-X or MSI if supported
3468 * @adapter: board private structure to initialize
3469 *
3470 * Attempt to configure the interrupts using the best available
3471 * capabilities of the hardware and the kernel.
3472 **/
feea6a57 3473static int ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter)
021230d4 3474{
8be0e467 3475 struct ixgbe_hw *hw = &adapter->hw;
021230d4
AV
3476 int err = 0;
3477 int vector, v_budget;
3478
3479 /*
3480 * It's easy to be greedy for MSI-X vectors, but it really
3481 * doesn't do us much good if we have a lot more vectors
3482 * than CPU's. So let's be conservative and only ask for
3483 * (roughly) twice the number of vectors as there are CPU's.
3484 */
3485 v_budget = min(adapter->num_rx_queues + adapter->num_tx_queues,
b4617240 3486 (int)(num_online_cpus() * 2)) + NON_Q_VECTORS;
021230d4
AV
3487
3488 /*
3489 * At the same time, hardware can only support a maximum of
8be0e467
PW
3490 * hw.mac->max_msix_vectors vectors. With features
3491 * such as RSS and VMDq, we can easily surpass the number of Rx and Tx
3492 * descriptor queues supported by our device. Thus, we cap it off in
3493 * those rare cases where the cpu count also exceeds our vector limit.
021230d4 3494 */
8be0e467 3495 v_budget = min(v_budget, (int)hw->mac.max_msix_vectors);
021230d4
AV
3496
3497 /* A failure in MSI-X entry allocation isn't fatal, but it does
3498 * mean we disable MSI-X capabilities of the adapter. */
3499 adapter->msix_entries = kcalloc(v_budget,
b4617240 3500 sizeof(struct msix_entry), GFP_KERNEL);
7a921c93
AD
3501 if (adapter->msix_entries) {
3502 for (vector = 0; vector < v_budget; vector++)
3503 adapter->msix_entries[vector].entry = vector;
021230d4 3504
7a921c93 3505 ixgbe_acquire_msix_vectors(adapter, v_budget);
021230d4 3506
7a921c93
AD
3507 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
3508 goto out;
3509 }
021230d4 3510
7a921c93
AD
3511 adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
3512 adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED;
c4cf55e5
PWJ
3513 adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
3514 adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
3515 adapter->atr_sample_rate = 0;
7a921c93 3516 ixgbe_set_num_queues(adapter);
021230d4 3517
021230d4
AV
3518 err = pci_enable_msi(adapter->pdev);
3519 if (!err) {
3520 adapter->flags |= IXGBE_FLAG_MSI_ENABLED;
3521 } else {
3522 DPRINTK(HW, DEBUG, "Unable to allocate MSI interrupt, "
b4617240 3523 "falling back to legacy. Error: %d\n", err);
021230d4
AV
3524 /* reset err */
3525 err = 0;
3526 }
3527
3528out:
021230d4
AV
3529 return err;
3530}
3531
7a921c93
AD
3532/**
3533 * ixgbe_alloc_q_vectors - Allocate memory for interrupt vectors
3534 * @adapter: board private structure to initialize
3535 *
3536 * We allocate one q_vector per queue interrupt. If allocation fails we
3537 * return -ENOMEM.
3538 **/
3539static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter)
3540{
3541 int q_idx, num_q_vectors;
3542 struct ixgbe_q_vector *q_vector;
3543 int napi_vectors;
3544 int (*poll)(struct napi_struct *, int);
3545
3546 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
3547 num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
3548 napi_vectors = adapter->num_rx_queues;
91281fd3 3549 poll = &ixgbe_clean_rxtx_many;
7a921c93
AD
3550 } else {
3551 num_q_vectors = 1;
3552 napi_vectors = 1;
3553 poll = &ixgbe_poll;
3554 }
3555
3556 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
3557 q_vector = kzalloc(sizeof(struct ixgbe_q_vector), GFP_KERNEL);
3558 if (!q_vector)
3559 goto err_out;
3560 q_vector->adapter = adapter;
7a921c93 3561 q_vector->eitr = adapter->eitr_param;
fe49f04a 3562 q_vector->v_idx = q_idx;
91281fd3 3563 netif_napi_add(adapter->netdev, &q_vector->napi, (*poll), 64);
7a921c93
AD
3564 adapter->q_vector[q_idx] = q_vector;
3565 }
3566
3567 return 0;
3568
3569err_out:
3570 while (q_idx) {
3571 q_idx--;
3572 q_vector = adapter->q_vector[q_idx];
3573 netif_napi_del(&q_vector->napi);
3574 kfree(q_vector);
3575 adapter->q_vector[q_idx] = NULL;
3576 }
3577 return -ENOMEM;
3578}
3579
3580/**
3581 * ixgbe_free_q_vectors - Free memory allocated for interrupt vectors
3582 * @adapter: board private structure to initialize
3583 *
3584 * This function frees the memory allocated to the q_vectors. In addition if
3585 * NAPI is enabled it will delete any references to the NAPI struct prior
3586 * to freeing the q_vector.
3587 **/
3588static void ixgbe_free_q_vectors(struct ixgbe_adapter *adapter)
3589{
3590 int q_idx, num_q_vectors;
7a921c93 3591
91281fd3 3592 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
7a921c93 3593 num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
91281fd3 3594 else
7a921c93 3595 num_q_vectors = 1;
7a921c93
AD
3596
3597 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
3598 struct ixgbe_q_vector *q_vector = adapter->q_vector[q_idx];
7a921c93 3599 adapter->q_vector[q_idx] = NULL;
91281fd3 3600 netif_napi_del(&q_vector->napi);
7a921c93
AD
3601 kfree(q_vector);
3602 }
3603}
3604
2f90b865 3605void ixgbe_reset_interrupt_capability(struct ixgbe_adapter *adapter)
021230d4
AV
3606{
3607 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
3608 adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
3609 pci_disable_msix(adapter->pdev);
3610 kfree(adapter->msix_entries);
3611 adapter->msix_entries = NULL;
3612 } else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) {
3613 adapter->flags &= ~IXGBE_FLAG_MSI_ENABLED;
3614 pci_disable_msi(adapter->pdev);
3615 }
3616 return;
3617}
3618
3619/**
3620 * ixgbe_init_interrupt_scheme - Determine proper interrupt scheme
3621 * @adapter: board private structure to initialize
3622 *
3623 * We determine which interrupt scheme to use based on...
3624 * - Kernel support (MSI, MSI-X)
3625 * - which can be user-defined (via MODULE_PARAM)
3626 * - Hardware queue count (num_*_queues)
3627 * - defined by miscellaneous hardware support/features (RSS, etc.)
3628 **/
2f90b865 3629int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter)
021230d4
AV
3630{
3631 int err;
3632
3633 /* Number of supported queues */
3634 ixgbe_set_num_queues(adapter);
3635
021230d4
AV
3636 err = ixgbe_set_interrupt_capability(adapter);
3637 if (err) {
3638 DPRINTK(PROBE, ERR, "Unable to setup interrupt capabilities\n");
3639 goto err_set_interrupt;
9a799d71
AK
3640 }
3641
7a921c93
AD
3642 err = ixgbe_alloc_q_vectors(adapter);
3643 if (err) {
3644 DPRINTK(PROBE, ERR, "Unable to allocate memory for queue "
3645 "vectors\n");
3646 goto err_alloc_q_vectors;
3647 }
3648
3649 err = ixgbe_alloc_queues(adapter);
3650 if (err) {
3651 DPRINTK(PROBE, ERR, "Unable to allocate memory for queues\n");
3652 goto err_alloc_queues;
3653 }
3654
021230d4 3655 DPRINTK(DRV, INFO, "Multiqueue %s: Rx Queue count = %u, "
b4617240
PW
3656 "Tx Queue count = %u\n",
3657 (adapter->num_rx_queues > 1) ? "Enabled" :
3658 "Disabled", adapter->num_rx_queues, adapter->num_tx_queues);
021230d4
AV
3659
3660 set_bit(__IXGBE_DOWN, &adapter->state);
3661
9a799d71 3662 return 0;
021230d4 3663
7a921c93
AD
3664err_alloc_queues:
3665 ixgbe_free_q_vectors(adapter);
3666err_alloc_q_vectors:
3667 ixgbe_reset_interrupt_capability(adapter);
021230d4 3668err_set_interrupt:
7a921c93
AD
3669 return err;
3670}
3671
3672/**
3673 * ixgbe_clear_interrupt_scheme - Clear the current interrupt scheme settings
3674 * @adapter: board private structure to clear interrupt scheme on
3675 *
3676 * We go through and clear interrupt specific resources and reset the structure
3677 * to pre-load conditions
3678 **/
3679void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter)
3680{
021230d4
AV
3681 kfree(adapter->tx_ring);
3682 kfree(adapter->rx_ring);
7a921c93
AD
3683 adapter->tx_ring = NULL;
3684 adapter->rx_ring = NULL;
3685
3686 ixgbe_free_q_vectors(adapter);
3687 ixgbe_reset_interrupt_capability(adapter);
9a799d71
AK
3688}
3689
c4900be0
DS
3690/**
3691 * ixgbe_sfp_timer - worker thread to find a missing module
3692 * @data: pointer to our adapter struct
3693 **/
3694static void ixgbe_sfp_timer(unsigned long data)
3695{
3696 struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data;
3697
4df10466
JB
3698 /*
3699 * Do the sfp_timer outside of interrupt context due to the
c4900be0
DS
3700 * delays that sfp+ detection requires
3701 */
3702 schedule_work(&adapter->sfp_task);
3703}
3704
3705/**
3706 * ixgbe_sfp_task - worker thread to find a missing module
3707 * @work: pointer to work_struct containing our data
3708 **/
3709static void ixgbe_sfp_task(struct work_struct *work)
3710{
3711 struct ixgbe_adapter *adapter = container_of(work,
3712 struct ixgbe_adapter,
3713 sfp_task);
3714 struct ixgbe_hw *hw = &adapter->hw;
3715
3716 if ((hw->phy.type == ixgbe_phy_nl) &&
3717 (hw->phy.sfp_type == ixgbe_sfp_type_not_present)) {
3718 s32 ret = hw->phy.ops.identify_sfp(hw);
3719 if (ret)
3720 goto reschedule;
3721 ret = hw->phy.ops.reset(hw);
3722 if (ret == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3723 DPRINTK(PROBE, ERR, "failed to initialize because an "
3724 "unsupported SFP+ module type was detected.\n"
3725 "Reload the driver after installing a "
3726 "supported module.\n");
3727 unregister_netdev(adapter->netdev);
3728 } else {
3729 DPRINTK(PROBE, INFO, "detected SFP+: %d\n",
3730 hw->phy.sfp_type);
3731 }
3732 /* don't need this routine any more */
3733 clear_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state);
3734 }
3735 return;
3736reschedule:
3737 if (test_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state))
3738 mod_timer(&adapter->sfp_timer,
3739 round_jiffies(jiffies + (2 * HZ)));
3740}
3741
9a799d71
AK
3742/**
3743 * ixgbe_sw_init - Initialize general software structures (struct ixgbe_adapter)
3744 * @adapter: board private structure to initialize
3745 *
3746 * ixgbe_sw_init initializes the Adapter private data structure.
3747 * Fields are initialized based on PCI device information and
3748 * OS network device settings (MTU size).
3749 **/
3750static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
3751{
3752 struct ixgbe_hw *hw = &adapter->hw;
3753 struct pci_dev *pdev = adapter->pdev;
021230d4 3754 unsigned int rss;
7a6b6f51 3755#ifdef CONFIG_IXGBE_DCB
2f90b865
AD
3756 int j;
3757 struct tc_configuration *tc;
3758#endif
021230d4 3759
c44ade9e
JB
3760 /* PCI config space info */
3761
3762 hw->vendor_id = pdev->vendor;
3763 hw->device_id = pdev->device;
3764 hw->revision_id = pdev->revision;
3765 hw->subsystem_vendor_id = pdev->subsystem_vendor;
3766 hw->subsystem_device_id = pdev->subsystem_device;
3767
021230d4
AV
3768 /* Set capability flags */
3769 rss = min(IXGBE_MAX_RSS_INDICES, (int)num_online_cpus());
3770 adapter->ring_feature[RING_F_RSS].indices = rss;
3771 adapter->flags |= IXGBE_FLAG_RSS_ENABLED;
2f90b865 3772 adapter->ring_feature[RING_F_DCB].indices = IXGBE_MAX_DCB_INDICES;
bf069c97
DS
3773 if (hw->mac.type == ixgbe_mac_82598EB) {
3774 if (hw->device_id == IXGBE_DEV_ID_82598AT)
3775 adapter->flags |= IXGBE_FLAG_FAN_FAIL_CAPABLE;
e8e26350 3776 adapter->max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82598;
bf069c97 3777 } else if (hw->mac.type == ixgbe_mac_82599EB) {
e8e26350 3778 adapter->max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82599;
df647b5c
PWJ
3779 adapter->flags |= IXGBE_FLAG2_RSC_CAPABLE;
3780 adapter->flags |= IXGBE_FLAG2_RSC_ENABLED;
c4cf55e5
PWJ
3781 adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
3782 adapter->ring_feature[RING_F_FDIR].indices =
3783 IXGBE_MAX_FDIR_INDICES;
3784 adapter->atr_sample_rate = 20;
3785 adapter->fdir_pballoc = 0;
eacd73f7
YZ
3786#ifdef IXGBE_FCOE
3787 adapter->flags |= IXGBE_FLAG_FCOE_ENABLED;
0331a832 3788 adapter->ring_feature[RING_F_FCOE].indices = IXGBE_FCRETA_SIZE;
eacd73f7 3789#endif /* IXGBE_FCOE */
f8212f97 3790 }
2f90b865 3791
7a6b6f51 3792#ifdef CONFIG_IXGBE_DCB
2f90b865
AD
3793 /* Configure DCB traffic classes */
3794 for (j = 0; j < MAX_TRAFFIC_CLASS; j++) {
3795 tc = &adapter->dcb_cfg.tc_config[j];
3796 tc->path[DCB_TX_CONFIG].bwg_id = 0;
3797 tc->path[DCB_TX_CONFIG].bwg_percent = 12 + (j & 1);
3798 tc->path[DCB_RX_CONFIG].bwg_id = 0;
3799 tc->path[DCB_RX_CONFIG].bwg_percent = 12 + (j & 1);
3800 tc->dcb_pfc = pfc_disabled;
3801 }
3802 adapter->dcb_cfg.bw_percentage[DCB_TX_CONFIG][0] = 100;
3803 adapter->dcb_cfg.bw_percentage[DCB_RX_CONFIG][0] = 100;
3804 adapter->dcb_cfg.rx_pba_cfg = pba_equal;
264857b8 3805 adapter->dcb_cfg.pfc_mode_enable = false;
2f90b865
AD
3806 adapter->dcb_cfg.round_robin_enable = false;
3807 adapter->dcb_set_bitmap = 0x00;
3808 ixgbe_copy_dcb_cfg(&adapter->dcb_cfg, &adapter->temp_dcb_cfg,
3809 adapter->ring_feature[RING_F_DCB].indices);
3810
3811#endif
9a799d71
AK
3812
3813 /* default flow control settings */
cd7664f6 3814 hw->fc.requested_mode = ixgbe_fc_full;
71fd570b 3815 hw->fc.current_mode = ixgbe_fc_full; /* init for ethtool output */
264857b8
PWJ
3816#ifdef CONFIG_DCB
3817 adapter->last_lfc_mode = hw->fc.current_mode;
3818#endif
2b9ade93
JB
3819 hw->fc.high_water = IXGBE_DEFAULT_FCRTH;
3820 hw->fc.low_water = IXGBE_DEFAULT_FCRTL;
3821 hw->fc.pause_time = IXGBE_DEFAULT_FCPAUSE;
3822 hw->fc.send_xon = true;
71fd570b 3823 hw->fc.disable_fc_autoneg = false;
9a799d71 3824
30efa5a3
JB
3825 /* enable itr by default in dynamic mode */
3826 adapter->itr_setting = 1;
3827 adapter->eitr_param = 20000;
3828
3829 /* set defaults for eitr in MegaBytes */
3830 adapter->eitr_low = 10;
3831 adapter->eitr_high = 20;
3832
3833 /* set default ring sizes */
3834 adapter->tx_ring_count = IXGBE_DEFAULT_TXD;
3835 adapter->rx_ring_count = IXGBE_DEFAULT_RXD;
3836
9a799d71 3837 /* initialize eeprom parameters */
c44ade9e 3838 if (ixgbe_init_eeprom_params_generic(hw)) {
9a799d71
AK
3839 dev_err(&pdev->dev, "EEPROM initialization failed\n");
3840 return -EIO;
3841 }
3842
021230d4 3843 /* enable rx csum by default */
9a799d71
AK
3844 adapter->flags |= IXGBE_FLAG_RX_CSUM_ENABLED;
3845
9a799d71
AK
3846 set_bit(__IXGBE_DOWN, &adapter->state);
3847
3848 return 0;
3849}
3850
3851/**
3852 * ixgbe_setup_tx_resources - allocate Tx resources (Descriptors)
3853 * @adapter: board private structure
3a581073 3854 * @tx_ring: tx descriptor ring (for a specific queue) to setup
9a799d71
AK
3855 *
3856 * Return 0 on success, negative on failure
3857 **/
3858int ixgbe_setup_tx_resources(struct ixgbe_adapter *adapter,
e01c31a5 3859 struct ixgbe_ring *tx_ring)
9a799d71
AK
3860{
3861 struct pci_dev *pdev = adapter->pdev;
3862 int size;
3863
3a581073
JB
3864 size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count;
3865 tx_ring->tx_buffer_info = vmalloc(size);
e01c31a5
JB
3866 if (!tx_ring->tx_buffer_info)
3867 goto err;
3a581073 3868 memset(tx_ring->tx_buffer_info, 0, size);
9a799d71
AK
3869
3870 /* round up to nearest 4K */
12207e49 3871 tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc);
3a581073 3872 tx_ring->size = ALIGN(tx_ring->size, 4096);
9a799d71 3873
3a581073
JB
3874 tx_ring->desc = pci_alloc_consistent(pdev, tx_ring->size,
3875 &tx_ring->dma);
e01c31a5
JB
3876 if (!tx_ring->desc)
3877 goto err;
9a799d71 3878
3a581073
JB
3879 tx_ring->next_to_use = 0;
3880 tx_ring->next_to_clean = 0;
3881 tx_ring->work_limit = tx_ring->count;
9a799d71 3882 return 0;
e01c31a5
JB
3883
3884err:
3885 vfree(tx_ring->tx_buffer_info);
3886 tx_ring->tx_buffer_info = NULL;
3887 DPRINTK(PROBE, ERR, "Unable to allocate memory for the transmit "
3888 "descriptor ring\n");
3889 return -ENOMEM;
9a799d71
AK
3890}
3891
69888674
AD
3892/**
3893 * ixgbe_setup_all_tx_resources - allocate all queues Tx resources
3894 * @adapter: board private structure
3895 *
3896 * If this function returns with an error, then it's possible one or
3897 * more of the rings is populated (while the rest are not). It is the
3898 * callers duty to clean those orphaned rings.
3899 *
3900 * Return 0 on success, negative on failure
3901 **/
3902static int ixgbe_setup_all_tx_resources(struct ixgbe_adapter *adapter)
3903{
3904 int i, err = 0;
3905
3906 for (i = 0; i < adapter->num_tx_queues; i++) {
3907 err = ixgbe_setup_tx_resources(adapter, &adapter->tx_ring[i]);
3908 if (!err)
3909 continue;
3910 DPRINTK(PROBE, ERR, "Allocation for Tx Queue %u failed\n", i);
3911 break;
3912 }
3913
3914 return err;
3915}
3916
9a799d71
AK
3917/**
3918 * ixgbe_setup_rx_resources - allocate Rx resources (Descriptors)
3919 * @adapter: board private structure
3a581073 3920 * @rx_ring: rx descriptor ring (for a specific queue) to setup
9a799d71
AK
3921 *
3922 * Returns 0 on success, negative on failure
3923 **/
3924int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter,
b4617240 3925 struct ixgbe_ring *rx_ring)
9a799d71
AK
3926{
3927 struct pci_dev *pdev = adapter->pdev;
021230d4 3928 int size;
9a799d71 3929
3a581073
JB
3930 size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count;
3931 rx_ring->rx_buffer_info = vmalloc(size);
3932 if (!rx_ring->rx_buffer_info) {
9a799d71 3933 DPRINTK(PROBE, ERR,
b4617240 3934 "vmalloc allocation failed for the rx desc ring\n");
177db6ff 3935 goto alloc_failed;
9a799d71 3936 }
3a581073 3937 memset(rx_ring->rx_buffer_info, 0, size);
9a799d71 3938
9a799d71 3939 /* Round up to nearest 4K */
3a581073
JB
3940 rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
3941 rx_ring->size = ALIGN(rx_ring->size, 4096);
9a799d71 3942
3a581073 3943 rx_ring->desc = pci_alloc_consistent(pdev, rx_ring->size, &rx_ring->dma);
9a799d71 3944
3a581073 3945 if (!rx_ring->desc) {
9a799d71 3946 DPRINTK(PROBE, ERR,
b4617240 3947 "Memory allocation failed for the rx desc ring\n");
3a581073 3948 vfree(rx_ring->rx_buffer_info);
177db6ff 3949 goto alloc_failed;
9a799d71
AK
3950 }
3951
3a581073
JB
3952 rx_ring->next_to_clean = 0;
3953 rx_ring->next_to_use = 0;
9a799d71
AK
3954
3955 return 0;
177db6ff
MC
3956
3957alloc_failed:
177db6ff 3958 return -ENOMEM;
9a799d71
AK
3959}
3960
69888674
AD
3961/**
3962 * ixgbe_setup_all_rx_resources - allocate all queues Rx resources
3963 * @adapter: board private structure
3964 *
3965 * If this function returns with an error, then it's possible one or
3966 * more of the rings is populated (while the rest are not). It is the
3967 * callers duty to clean those orphaned rings.
3968 *
3969 * Return 0 on success, negative on failure
3970 **/
3971
3972static int ixgbe_setup_all_rx_resources(struct ixgbe_adapter *adapter)
3973{
3974 int i, err = 0;
3975
3976 for (i = 0; i < adapter->num_rx_queues; i++) {
3977 err = ixgbe_setup_rx_resources(adapter, &adapter->rx_ring[i]);
3978 if (!err)
3979 continue;
3980 DPRINTK(PROBE, ERR, "Allocation for Rx Queue %u failed\n", i);
3981 break;
3982 }
3983
3984 return err;
3985}
3986
9a799d71
AK
3987/**
3988 * ixgbe_free_tx_resources - Free Tx Resources per Queue
3989 * @adapter: board private structure
3990 * @tx_ring: Tx descriptor ring for a specific queue
3991 *
3992 * Free all transmit software resources
3993 **/
c431f97e
JB
3994void ixgbe_free_tx_resources(struct ixgbe_adapter *adapter,
3995 struct ixgbe_ring *tx_ring)
9a799d71
AK
3996{
3997 struct pci_dev *pdev = adapter->pdev;
3998
3999 ixgbe_clean_tx_ring(adapter, tx_ring);
4000
4001 vfree(tx_ring->tx_buffer_info);
4002 tx_ring->tx_buffer_info = NULL;
4003
4004 pci_free_consistent(pdev, tx_ring->size, tx_ring->desc, tx_ring->dma);
4005
4006 tx_ring->desc = NULL;
4007}
4008
4009/**
4010 * ixgbe_free_all_tx_resources - Free Tx Resources for All Queues
4011 * @adapter: board private structure
4012 *
4013 * Free all transmit software resources
4014 **/
4015static void ixgbe_free_all_tx_resources(struct ixgbe_adapter *adapter)
4016{
4017 int i;
4018
4019 for (i = 0; i < adapter->num_tx_queues; i++)
9891ca7c
JB
4020 if (adapter->tx_ring[i].desc)
4021 ixgbe_free_tx_resources(adapter, &adapter->tx_ring[i]);
9a799d71
AK
4022}
4023
4024/**
b4617240 4025 * ixgbe_free_rx_resources - Free Rx Resources
9a799d71
AK
4026 * @adapter: board private structure
4027 * @rx_ring: ring to clean the resources from
4028 *
4029 * Free all receive software resources
4030 **/
c431f97e
JB
4031void ixgbe_free_rx_resources(struct ixgbe_adapter *adapter,
4032 struct ixgbe_ring *rx_ring)
9a799d71
AK
4033{
4034 struct pci_dev *pdev = adapter->pdev;
4035
4036 ixgbe_clean_rx_ring(adapter, rx_ring);
4037
4038 vfree(rx_ring->rx_buffer_info);
4039 rx_ring->rx_buffer_info = NULL;
4040
4041 pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, rx_ring->dma);
4042
4043 rx_ring->desc = NULL;
4044}
4045
4046/**
4047 * ixgbe_free_all_rx_resources - Free Rx Resources for All Queues
4048 * @adapter: board private structure
4049 *
4050 * Free all receive software resources
4051 **/
4052static void ixgbe_free_all_rx_resources(struct ixgbe_adapter *adapter)
4053{
4054 int i;
4055
4056 for (i = 0; i < adapter->num_rx_queues; i++)
9891ca7c
JB
4057 if (adapter->rx_ring[i].desc)
4058 ixgbe_free_rx_resources(adapter, &adapter->rx_ring[i]);
9a799d71
AK
4059}
4060
9a799d71
AK
4061/**
4062 * ixgbe_change_mtu - Change the Maximum Transfer Unit
4063 * @netdev: network interface device structure
4064 * @new_mtu: new value for maximum frame size
4065 *
4066 * Returns 0 on success, negative on failure
4067 **/
4068static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu)
4069{
4070 struct ixgbe_adapter *adapter = netdev_priv(netdev);
4071 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
4072
42c783c5
JB
4073 /* MTU < 68 is an error and causes problems on some kernels */
4074 if ((new_mtu < 68) || (max_frame > IXGBE_MAX_JUMBO_FRAME_SIZE))
9a799d71
AK
4075 return -EINVAL;
4076
021230d4 4077 DPRINTK(PROBE, INFO, "changing MTU from %d to %d\n",
b4617240 4078 netdev->mtu, new_mtu);
021230d4 4079 /* must set new MTU before calling down or up */
9a799d71
AK
4080 netdev->mtu = new_mtu;
4081
d4f80882
AV
4082 if (netif_running(netdev))
4083 ixgbe_reinit_locked(adapter);
9a799d71
AK
4084
4085 return 0;
4086}
4087
4088/**
4089 * ixgbe_open - Called when a network interface is made active
4090 * @netdev: network interface device structure
4091 *
4092 * Returns 0 on success, negative value on failure
4093 *
4094 * The open entry point is called when a network interface is made
4095 * active by the system (IFF_UP). At this point all resources needed
4096 * for transmit and receive operations are allocated, the interrupt
4097 * handler is registered with the OS, the watchdog timer is started,
4098 * and the stack is notified that the interface is ready.
4099 **/
4100static int ixgbe_open(struct net_device *netdev)
4101{
4102 struct ixgbe_adapter *adapter = netdev_priv(netdev);
4103 int err;
4bebfaa5
AK
4104
4105 /* disallow open during test */
4106 if (test_bit(__IXGBE_TESTING, &adapter->state))
4107 return -EBUSY;
9a799d71 4108
54386467
JB
4109 netif_carrier_off(netdev);
4110
9a799d71
AK
4111 /* allocate transmit descriptors */
4112 err = ixgbe_setup_all_tx_resources(adapter);
4113 if (err)
4114 goto err_setup_tx;
4115
9a799d71
AK
4116 /* allocate receive descriptors */
4117 err = ixgbe_setup_all_rx_resources(adapter);
4118 if (err)
4119 goto err_setup_rx;
4120
4121 ixgbe_configure(adapter);
4122
021230d4 4123 err = ixgbe_request_irq(adapter);
9a799d71
AK
4124 if (err)
4125 goto err_req_irq;
4126
9a799d71
AK
4127 err = ixgbe_up_complete(adapter);
4128 if (err)
4129 goto err_up;
4130
d55b53ff
JK
4131 netif_tx_start_all_queues(netdev);
4132
9a799d71
AK
4133 return 0;
4134
4135err_up:
5eba3699 4136 ixgbe_release_hw_control(adapter);
9a799d71
AK
4137 ixgbe_free_irq(adapter);
4138err_req_irq:
9a799d71 4139err_setup_rx:
a20a1199 4140 ixgbe_free_all_rx_resources(adapter);
9a799d71 4141err_setup_tx:
a20a1199 4142 ixgbe_free_all_tx_resources(adapter);
9a799d71
AK
4143 ixgbe_reset(adapter);
4144
4145 return err;
4146}
4147
4148/**
4149 * ixgbe_close - Disables a network interface
4150 * @netdev: network interface device structure
4151 *
4152 * Returns 0, this is not allowed to fail
4153 *
4154 * The close entry point is called when an interface is de-activated
4155 * by the OS. The hardware is still under the drivers control, but
4156 * needs to be disabled. A global MAC reset is issued to stop the
4157 * hardware, and all transmit and receive resources are freed.
4158 **/
4159static int ixgbe_close(struct net_device *netdev)
4160{
4161 struct ixgbe_adapter *adapter = netdev_priv(netdev);
9a799d71
AK
4162
4163 ixgbe_down(adapter);
4164 ixgbe_free_irq(adapter);
4165
4166 ixgbe_free_all_tx_resources(adapter);
4167 ixgbe_free_all_rx_resources(adapter);
4168
5eba3699 4169 ixgbe_release_hw_control(adapter);
9a799d71
AK
4170
4171 return 0;
4172}
4173
b3c8b4ba
AD
4174#ifdef CONFIG_PM
4175static int ixgbe_resume(struct pci_dev *pdev)
4176{
4177 struct net_device *netdev = pci_get_drvdata(pdev);
4178 struct ixgbe_adapter *adapter = netdev_priv(netdev);
4179 u32 err;
4180
4181 pci_set_power_state(pdev, PCI_D0);
4182 pci_restore_state(pdev);
9ce77666 4183
4184 err = pci_enable_device_mem(pdev);
b3c8b4ba 4185 if (err) {
69888674 4186 printk(KERN_ERR "ixgbe: Cannot enable PCI device from "
b3c8b4ba
AD
4187 "suspend\n");
4188 return err;
4189 }
4190 pci_set_master(pdev);
4191
dd4d8ca6 4192 pci_wake_from_d3(pdev, false);
b3c8b4ba
AD
4193
4194 err = ixgbe_init_interrupt_scheme(adapter);
4195 if (err) {
4196 printk(KERN_ERR "ixgbe: Cannot initialize interrupts for "
4197 "device\n");
4198 return err;
4199 }
4200
b3c8b4ba
AD
4201 ixgbe_reset(adapter);
4202
495dce12
WJP
4203 IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0);
4204
b3c8b4ba
AD
4205 if (netif_running(netdev)) {
4206 err = ixgbe_open(adapter->netdev);
4207 if (err)
4208 return err;
4209 }
4210
4211 netif_device_attach(netdev);
4212
4213 return 0;
4214}
b3c8b4ba 4215#endif /* CONFIG_PM */
9d8d05ae
RW
4216
4217static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake)
b3c8b4ba
AD
4218{
4219 struct net_device *netdev = pci_get_drvdata(pdev);
4220 struct ixgbe_adapter *adapter = netdev_priv(netdev);
e8e26350
PW
4221 struct ixgbe_hw *hw = &adapter->hw;
4222 u32 ctrl, fctrl;
4223 u32 wufc = adapter->wol;
b3c8b4ba
AD
4224#ifdef CONFIG_PM
4225 int retval = 0;
4226#endif
4227
4228 netif_device_detach(netdev);
4229
4230 if (netif_running(netdev)) {
4231 ixgbe_down(adapter);
4232 ixgbe_free_irq(adapter);
4233 ixgbe_free_all_tx_resources(adapter);
4234 ixgbe_free_all_rx_resources(adapter);
4235 }
7a921c93 4236 ixgbe_clear_interrupt_scheme(adapter);
b3c8b4ba
AD
4237
4238#ifdef CONFIG_PM
4239 retval = pci_save_state(pdev);
4240 if (retval)
4241 return retval;
4df10466 4242
b3c8b4ba 4243#endif
e8e26350
PW
4244 if (wufc) {
4245 ixgbe_set_rx_mode(netdev);
b3c8b4ba 4246
e8e26350
PW
4247 /* turn on all-multi mode if wake on multicast is enabled */
4248 if (wufc & IXGBE_WUFC_MC) {
4249 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
4250 fctrl |= IXGBE_FCTRL_MPE;
4251 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
4252 }
4253
4254 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
4255 ctrl |= IXGBE_CTRL_GIO_DIS;
4256 IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl);
4257
4258 IXGBE_WRITE_REG(hw, IXGBE_WUFC, wufc);
4259 } else {
4260 IXGBE_WRITE_REG(hw, IXGBE_WUC, 0);
4261 IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
4262 }
4263
dd4d8ca6
DS
4264 if (wufc && hw->mac.type == ixgbe_mac_82599EB)
4265 pci_wake_from_d3(pdev, true);
4266 else
4267 pci_wake_from_d3(pdev, false);
b3c8b4ba 4268
9d8d05ae
RW
4269 *enable_wake = !!wufc;
4270
b3c8b4ba
AD
4271 ixgbe_release_hw_control(adapter);
4272
4273 pci_disable_device(pdev);
4274
9d8d05ae
RW
4275 return 0;
4276}
4277
4278#ifdef CONFIG_PM
4279static int ixgbe_suspend(struct pci_dev *pdev, pm_message_t state)
4280{
4281 int retval;
4282 bool wake;
4283
4284 retval = __ixgbe_shutdown(pdev, &wake);
4285 if (retval)
4286 return retval;
4287
4288 if (wake) {
4289 pci_prepare_to_sleep(pdev);
4290 } else {
4291 pci_wake_from_d3(pdev, false);
4292 pci_set_power_state(pdev, PCI_D3hot);
4293 }
b3c8b4ba
AD
4294
4295 return 0;
4296}
9d8d05ae 4297#endif /* CONFIG_PM */
b3c8b4ba
AD
4298
4299static void ixgbe_shutdown(struct pci_dev *pdev)
4300{
9d8d05ae
RW
4301 bool wake;
4302
4303 __ixgbe_shutdown(pdev, &wake);
4304
4305 if (system_state == SYSTEM_POWER_OFF) {
4306 pci_wake_from_d3(pdev, wake);
4307 pci_set_power_state(pdev, PCI_D3hot);
4308 }
b3c8b4ba
AD
4309}
4310
9a799d71
AK
4311/**
4312 * ixgbe_update_stats - Update the board statistics counters.
4313 * @adapter: board private structure
4314 **/
4315void ixgbe_update_stats(struct ixgbe_adapter *adapter)
4316{
4317 struct ixgbe_hw *hw = &adapter->hw;
6f11eef7
AV
4318 u64 total_mpc = 0;
4319 u32 i, missed_rx = 0, mpc, bprc, lxon, lxoff, xon_off_tot;
9a799d71 4320
d51019a4 4321 if (hw->mac.type == ixgbe_mac_82599EB) {
f8212f97 4322 u64 rsc_count = 0;
d51019a4
PW
4323 for (i = 0; i < 16; i++)
4324 adapter->hw_rx_no_dma_resources +=
4325 IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
f8212f97
AD
4326 for (i = 0; i < adapter->num_rx_queues; i++)
4327 rsc_count += adapter->rx_ring[i].rsc_count;
4328 adapter->rsc_count = rsc_count;
d51019a4
PW
4329 }
4330
9a799d71 4331 adapter->stats.crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
6f11eef7
AV
4332 for (i = 0; i < 8; i++) {
4333 /* for packet buffers not used, the register should read 0 */
4334 mpc = IXGBE_READ_REG(hw, IXGBE_MPC(i));
4335 missed_rx += mpc;
4336 adapter->stats.mpc[i] += mpc;
4337 total_mpc += adapter->stats.mpc[i];
e8e26350
PW
4338 if (hw->mac.type == ixgbe_mac_82598EB)
4339 adapter->stats.rnbc[i] += IXGBE_READ_REG(hw, IXGBE_RNBC(i));
2f90b865
AD
4340 adapter->stats.qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
4341 adapter->stats.qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC(i));
4342 adapter->stats.qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
4343 adapter->stats.qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC(i));
e8e26350
PW
4344 if (hw->mac.type == ixgbe_mac_82599EB) {
4345 adapter->stats.pxonrxc[i] += IXGBE_READ_REG(hw,
4346 IXGBE_PXONRXCNT(i));
4347 adapter->stats.pxoffrxc[i] += IXGBE_READ_REG(hw,
4348 IXGBE_PXOFFRXCNT(i));
4349 adapter->stats.qprdc[i] += IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
e8e26350
PW
4350 } else {
4351 adapter->stats.pxonrxc[i] += IXGBE_READ_REG(hw,
4352 IXGBE_PXONRXC(i));
4353 adapter->stats.pxoffrxc[i] += IXGBE_READ_REG(hw,
4354 IXGBE_PXOFFRXC(i));
4355 }
2f90b865
AD
4356 adapter->stats.pxontxc[i] += IXGBE_READ_REG(hw,
4357 IXGBE_PXONTXC(i));
2f90b865 4358 adapter->stats.pxofftxc[i] += IXGBE_READ_REG(hw,
e8e26350 4359 IXGBE_PXOFFTXC(i));
6f11eef7
AV
4360 }
4361 adapter->stats.gprc += IXGBE_READ_REG(hw, IXGBE_GPRC);
4362 /* work around hardware counting issue */
4363 adapter->stats.gprc -= missed_rx;
4364
4365 /* 82598 hardware only has a 32 bit counter in the high register */
e8e26350
PW
4366 if (hw->mac.type == ixgbe_mac_82599EB) {
4367 adapter->stats.gorc += IXGBE_READ_REG(hw, IXGBE_GORCL);
4368 IXGBE_READ_REG(hw, IXGBE_GORCH); /* to clear */
4369 adapter->stats.gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL);
4370 IXGBE_READ_REG(hw, IXGBE_GOTCH); /* to clear */
4371 adapter->stats.tor += IXGBE_READ_REG(hw, IXGBE_TORL);
4372 IXGBE_READ_REG(hw, IXGBE_TORH); /* to clear */
4373 adapter->stats.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
4374 adapter->stats.lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
c4cf55e5
PWJ
4375 adapter->stats.fdirmatch += IXGBE_READ_REG(hw, IXGBE_FDIRMATCH);
4376 adapter->stats.fdirmiss += IXGBE_READ_REG(hw, IXGBE_FDIRMISS);
6d45522c
YZ
4377#ifdef IXGBE_FCOE
4378 adapter->stats.fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC);
4379 adapter->stats.fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
4380 adapter->stats.fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
4381 adapter->stats.fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
4382 adapter->stats.fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
4383 adapter->stats.fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
4384#endif /* IXGBE_FCOE */
e8e26350
PW
4385 } else {
4386 adapter->stats.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
4387 adapter->stats.lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
4388 adapter->stats.gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
4389 adapter->stats.gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
4390 adapter->stats.tor += IXGBE_READ_REG(hw, IXGBE_TORH);
4391 }
9a799d71
AK
4392 bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
4393 adapter->stats.bprc += bprc;
4394 adapter->stats.mprc += IXGBE_READ_REG(hw, IXGBE_MPRC);
e8e26350
PW
4395 if (hw->mac.type == ixgbe_mac_82598EB)
4396 adapter->stats.mprc -= bprc;
9a799d71
AK
4397 adapter->stats.roc += IXGBE_READ_REG(hw, IXGBE_ROC);
4398 adapter->stats.prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64);
4399 adapter->stats.prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127);
4400 adapter->stats.prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255);
4401 adapter->stats.prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511);
4402 adapter->stats.prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023);
4403 adapter->stats.prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522);
9a799d71 4404 adapter->stats.rlec += IXGBE_READ_REG(hw, IXGBE_RLEC);
6f11eef7
AV
4405 lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
4406 adapter->stats.lxontxc += lxon;
4407 lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
4408 adapter->stats.lxofftxc += lxoff;
9a799d71
AK
4409 adapter->stats.ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
4410 adapter->stats.gptc += IXGBE_READ_REG(hw, IXGBE_GPTC);
6f11eef7
AV
4411 adapter->stats.mptc += IXGBE_READ_REG(hw, IXGBE_MPTC);
4412 /*
4413 * 82598 errata - tx of flow control packets is included in tx counters
4414 */
4415 xon_off_tot = lxon + lxoff;
4416 adapter->stats.gptc -= xon_off_tot;
4417 adapter->stats.mptc -= xon_off_tot;
4418 adapter->stats.gotc -= (xon_off_tot * (ETH_ZLEN + ETH_FCS_LEN));
9a799d71
AK
4419 adapter->stats.ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
4420 adapter->stats.rfc += IXGBE_READ_REG(hw, IXGBE_RFC);
4421 adapter->stats.rjc += IXGBE_READ_REG(hw, IXGBE_RJC);
9a799d71
AK
4422 adapter->stats.tpr += IXGBE_READ_REG(hw, IXGBE_TPR);
4423 adapter->stats.ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64);
6f11eef7 4424 adapter->stats.ptc64 -= xon_off_tot;
9a799d71
AK
4425 adapter->stats.ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127);
4426 adapter->stats.ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255);
4427 adapter->stats.ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511);
4428 adapter->stats.ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023);
4429 adapter->stats.ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522);
9a799d71
AK
4430 adapter->stats.bptc += IXGBE_READ_REG(hw, IXGBE_BPTC);
4431
4432 /* Fill out the OS statistics structure */
9a799d71
AK
4433 adapter->net_stats.multicast = adapter->stats.mprc;
4434
4435 /* Rx Errors */
4436 adapter->net_stats.rx_errors = adapter->stats.crcerrs +
b4617240 4437 adapter->stats.rlec;
9a799d71
AK
4438 adapter->net_stats.rx_dropped = 0;
4439 adapter->net_stats.rx_length_errors = adapter->stats.rlec;
4440 adapter->net_stats.rx_crc_errors = adapter->stats.crcerrs;
6f11eef7 4441 adapter->net_stats.rx_missed_errors = total_mpc;
9a799d71
AK
4442}
4443
4444/**
4445 * ixgbe_watchdog - Timer Call-back
4446 * @data: pointer to adapter cast into an unsigned long
4447 **/
4448static void ixgbe_watchdog(unsigned long data)
4449{
4450 struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data;
cf8280ee 4451 struct ixgbe_hw *hw = &adapter->hw;
fe49f04a
AD
4452 u64 eics = 0;
4453 int i;
cf8280ee 4454
fe49f04a
AD
4455 /*
4456 * Do the watchdog outside of interrupt context due to the lovely
4457 * delays that some of the newer hardware requires
4458 */
22d5a71b 4459
fe49f04a
AD
4460 if (test_bit(__IXGBE_DOWN, &adapter->state))
4461 goto watchdog_short_circuit;
22d5a71b 4462
fe49f04a
AD
4463 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) {
4464 /*
4465 * for legacy and MSI interrupts don't set any bits
4466 * that are enabled for EIAM, because this operation
4467 * would set *both* EIMS and EICS for any bit in EIAM
4468 */
4469 IXGBE_WRITE_REG(hw, IXGBE_EICS,
4470 (IXGBE_EICS_TCP_TIMER | IXGBE_EICS_OTHER));
4471 goto watchdog_reschedule;
4472 }
4473
4474 /* get one bit for every active tx/rx interrupt vector */
4475 for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) {
4476 struct ixgbe_q_vector *qv = adapter->q_vector[i];
4477 if (qv->rxr_count || qv->txr_count)
4478 eics |= ((u64)1 << i);
cf8280ee 4479 }
9a799d71 4480
fe49f04a
AD
4481 /* Cause software interrupt to ensure rx rings are cleaned */
4482 ixgbe_irq_rearm_queues(adapter, eics);
4483
4484watchdog_reschedule:
4485 /* Reset the timer */
4486 mod_timer(&adapter->watchdog_timer, round_jiffies(jiffies + 2 * HZ));
4487
4488watchdog_short_circuit:
cf8280ee
JB
4489 schedule_work(&adapter->watchdog_task);
4490}
4491
e8e26350
PW
4492/**
4493 * ixgbe_multispeed_fiber_task - worker thread to configure multispeed fiber
4494 * @work: pointer to work_struct containing our data
4495 **/
4496static void ixgbe_multispeed_fiber_task(struct work_struct *work)
4497{
4498 struct ixgbe_adapter *adapter = container_of(work,
4499 struct ixgbe_adapter,
4500 multispeed_fiber_task);
4501 struct ixgbe_hw *hw = &adapter->hw;
4502 u32 autoneg;
4503
4504 adapter->flags |= IXGBE_FLAG_IN_SFP_LINK_TASK;
4505 if (hw->mac.ops.get_link_capabilities)
4506 hw->mac.ops.get_link_capabilities(hw, &autoneg,
4507 &hw->mac.autoneg);
4508 if (hw->mac.ops.setup_link_speed)
4509 hw->mac.ops.setup_link_speed(hw, autoneg, true, true);
4510 adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
4511 adapter->flags &= ~IXGBE_FLAG_IN_SFP_LINK_TASK;
4512}
4513
4514/**
4515 * ixgbe_sfp_config_module_task - worker thread to configure a new SFP+ module
4516 * @work: pointer to work_struct containing our data
4517 **/
4518static void ixgbe_sfp_config_module_task(struct work_struct *work)
4519{
4520 struct ixgbe_adapter *adapter = container_of(work,
4521 struct ixgbe_adapter,
4522 sfp_config_module_task);
4523 struct ixgbe_hw *hw = &adapter->hw;
4524 u32 err;
4525
4526 adapter->flags |= IXGBE_FLAG_IN_SFP_MOD_TASK;
4527 err = hw->phy.ops.identify_sfp(hw);
4528 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
4529 DPRINTK(PROBE, ERR, "PHY not supported on this NIC %d\n", err);
4530 ixgbe_down(adapter);
4531 return;
4532 }
4533 hw->mac.ops.setup_sfp(hw);
4534
8d1c3c07 4535 if (!(adapter->flags & IXGBE_FLAG_IN_SFP_LINK_TASK))
e8e26350
PW
4536 /* This will also work for DA Twinax connections */
4537 schedule_work(&adapter->multispeed_fiber_task);
4538 adapter->flags &= ~IXGBE_FLAG_IN_SFP_MOD_TASK;
4539}
4540
c4cf55e5
PWJ
4541/**
4542 * ixgbe_fdir_reinit_task - worker thread to reinit FDIR filter table
4543 * @work: pointer to work_struct containing our data
4544 **/
4545static void ixgbe_fdir_reinit_task(struct work_struct *work)
4546{
4547 struct ixgbe_adapter *adapter = container_of(work,
4548 struct ixgbe_adapter,
4549 fdir_reinit_task);
4550 struct ixgbe_hw *hw = &adapter->hw;
4551 int i;
4552
4553 if (ixgbe_reinit_fdir_tables_82599(hw) == 0) {
4554 for (i = 0; i < adapter->num_tx_queues; i++)
4555 set_bit(__IXGBE_FDIR_INIT_DONE,
4556 &(adapter->tx_ring[i].reinit_state));
4557 } else {
4558 DPRINTK(PROBE, ERR, "failed to finish FDIR re-initialization, "
4559 "ignored adding FDIR ATR filters \n");
4560 }
4561 /* Done FDIR Re-initialization, enable transmits */
4562 netif_tx_start_all_queues(adapter->netdev);
4563}
4564
cf8280ee 4565/**
69888674
AD
4566 * ixgbe_watchdog_task - worker thread to bring link up
4567 * @work: pointer to work_struct containing our data
cf8280ee
JB
4568 **/
4569static void ixgbe_watchdog_task(struct work_struct *work)
4570{
4571 struct ixgbe_adapter *adapter = container_of(work,
4572 struct ixgbe_adapter,
4573 watchdog_task);
4574 struct net_device *netdev = adapter->netdev;
4575 struct ixgbe_hw *hw = &adapter->hw;
4576 u32 link_speed = adapter->link_speed;
4577 bool link_up = adapter->link_up;
bc59fcda
NS
4578 int i;
4579 struct ixgbe_ring *tx_ring;
4580 int some_tx_pending = 0;
cf8280ee
JB
4581
4582 adapter->flags |= IXGBE_FLAG_IN_WATCHDOG_TASK;
4583
4584 if (adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE) {
4585 hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
264857b8
PWJ
4586 if (link_up) {
4587#ifdef CONFIG_DCB
4588 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
4589 for (i = 0; i < MAX_TRAFFIC_CLASS; i++)
620fa036 4590 hw->mac.ops.fc_enable(hw, i);
264857b8 4591 } else {
620fa036 4592 hw->mac.ops.fc_enable(hw, 0);
264857b8
PWJ
4593 }
4594#else
620fa036 4595 hw->mac.ops.fc_enable(hw, 0);
264857b8
PWJ
4596#endif
4597 }
4598
cf8280ee
JB
4599 if (link_up ||
4600 time_after(jiffies, (adapter->link_check_timeout +
4601 IXGBE_TRY_LINK_TIMEOUT))) {
cf8280ee 4602 adapter->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE;
264857b8 4603 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMC_LSC);
cf8280ee
JB
4604 }
4605 adapter->link_up = link_up;
4606 adapter->link_speed = link_speed;
4607 }
9a799d71
AK
4608
4609 if (link_up) {
4610 if (!netif_carrier_ok(netdev)) {
e8e26350
PW
4611 bool flow_rx, flow_tx;
4612
4613 if (hw->mac.type == ixgbe_mac_82599EB) {
4614 u32 mflcn = IXGBE_READ_REG(hw, IXGBE_MFLCN);
4615 u32 fccfg = IXGBE_READ_REG(hw, IXGBE_FCCFG);
4616 flow_rx = (mflcn & IXGBE_MFLCN_RFCE);
4617 flow_tx = (fccfg & IXGBE_FCCFG_TFCE_802_3X);
4618 } else {
4619 u32 frctl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
4620 u32 rmcs = IXGBE_READ_REG(hw, IXGBE_RMCS);
4621 flow_rx = (frctl & IXGBE_FCTRL_RFCE);
4622 flow_tx = (rmcs & IXGBE_RMCS_TFCE_802_3X);
4623 }
4624
a46e534b
JK
4625 printk(KERN_INFO "ixgbe: %s NIC Link is Up %s, "
4626 "Flow Control: %s\n",
4627 netdev->name,
4628 (link_speed == IXGBE_LINK_SPEED_10GB_FULL ?
4629 "10 Gbps" :
4630 (link_speed == IXGBE_LINK_SPEED_1GB_FULL ?
4631 "1 Gbps" : "unknown speed")),
e8e26350
PW
4632 ((flow_rx && flow_tx) ? "RX/TX" :
4633 (flow_rx ? "RX" :
4634 (flow_tx ? "TX" : "None"))));
9a799d71
AK
4635
4636 netif_carrier_on(netdev);
9a799d71
AK
4637 } else {
4638 /* Force detection of hung controller */
4639 adapter->detect_tx_hung = true;
4640 }
4641 } else {
cf8280ee
JB
4642 adapter->link_up = false;
4643 adapter->link_speed = 0;
9a799d71 4644 if (netif_carrier_ok(netdev)) {
a46e534b
JK
4645 printk(KERN_INFO "ixgbe: %s NIC Link is Down\n",
4646 netdev->name);
9a799d71 4647 netif_carrier_off(netdev);
9a799d71
AK
4648 }
4649 }
4650
bc59fcda
NS
4651 if (!netif_carrier_ok(netdev)) {
4652 for (i = 0; i < adapter->num_tx_queues; i++) {
4653 tx_ring = &adapter->tx_ring[i];
4654 if (tx_ring->next_to_use != tx_ring->next_to_clean) {
4655 some_tx_pending = 1;
4656 break;
4657 }
4658 }
4659
4660 if (some_tx_pending) {
4661 /* We've lost link, so the controller stops DMA,
4662 * but we've got queued Tx work that's never going
4663 * to get done, so reset controller to flush Tx.
4664 * (Do the reset outside of interrupt context).
4665 */
4666 schedule_work(&adapter->reset_task);
4667 }
4668 }
4669
9a799d71 4670 ixgbe_update_stats(adapter);
cf8280ee 4671 adapter->flags &= ~IXGBE_FLAG_IN_WATCHDOG_TASK;
9a799d71
AK
4672}
4673
9a799d71 4674static int ixgbe_tso(struct ixgbe_adapter *adapter,
b4617240
PW
4675 struct ixgbe_ring *tx_ring, struct sk_buff *skb,
4676 u32 tx_flags, u8 *hdr_len)
9a799d71
AK
4677{
4678 struct ixgbe_adv_tx_context_desc *context_desc;
4679 unsigned int i;
4680 int err;
4681 struct ixgbe_tx_buffer *tx_buffer_info;
9f8cdf4f
JB
4682 u32 vlan_macip_lens = 0, type_tucmd_mlhl;
4683 u32 mss_l4len_idx, l4len;
9a799d71
AK
4684
4685 if (skb_is_gso(skb)) {
4686 if (skb_header_cloned(skb)) {
4687 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
4688 if (err)
4689 return err;
4690 }
4691 l4len = tcp_hdrlen(skb);
4692 *hdr_len += l4len;
4693
8327d000 4694 if (skb->protocol == htons(ETH_P_IP)) {
9a799d71
AK
4695 struct iphdr *iph = ip_hdr(skb);
4696 iph->tot_len = 0;
4697 iph->check = 0;
4698 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
b4617240
PW
4699 iph->daddr, 0,
4700 IPPROTO_TCP,
4701 0);
9a799d71
AK
4702 adapter->hw_tso_ctxt++;
4703 } else if (skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6) {
4704 ipv6_hdr(skb)->payload_len = 0;
4705 tcp_hdr(skb)->check =
4706 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
b4617240
PW
4707 &ipv6_hdr(skb)->daddr,
4708 0, IPPROTO_TCP, 0);
9a799d71
AK
4709 adapter->hw_tso6_ctxt++;
4710 }
4711
4712 i = tx_ring->next_to_use;
4713
4714 tx_buffer_info = &tx_ring->tx_buffer_info[i];
4715 context_desc = IXGBE_TX_CTXTDESC_ADV(*tx_ring, i);
4716
4717 /* VLAN MACLEN IPLEN */
4718 if (tx_flags & IXGBE_TX_FLAGS_VLAN)
4719 vlan_macip_lens |=
4720 (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK);
4721 vlan_macip_lens |= ((skb_network_offset(skb)) <<
b4617240 4722 IXGBE_ADVTXD_MACLEN_SHIFT);
9a799d71
AK
4723 *hdr_len += skb_network_offset(skb);
4724 vlan_macip_lens |=
4725 (skb_transport_header(skb) - skb_network_header(skb));
4726 *hdr_len +=
4727 (skb_transport_header(skb) - skb_network_header(skb));
4728 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
4729 context_desc->seqnum_seed = 0;
4730
4731 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
9f8cdf4f 4732 type_tucmd_mlhl = (IXGBE_TXD_CMD_DEXT |
b4617240 4733 IXGBE_ADVTXD_DTYP_CTXT);
9a799d71 4734
8327d000 4735 if (skb->protocol == htons(ETH_P_IP))
9a799d71
AK
4736 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
4737 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
4738 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl);
4739
4740 /* MSS L4LEN IDX */
9f8cdf4f 4741 mss_l4len_idx =
9a799d71
AK
4742 (skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT);
4743 mss_l4len_idx |= (l4len << IXGBE_ADVTXD_L4LEN_SHIFT);
4eeae6fd
PW
4744 /* use index 1 for TSO */
4745 mss_l4len_idx |= (1 << IXGBE_ADVTXD_IDX_SHIFT);
9a799d71
AK
4746 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
4747
4748 tx_buffer_info->time_stamp = jiffies;
4749 tx_buffer_info->next_to_watch = i;
4750
4751 i++;
4752 if (i == tx_ring->count)
4753 i = 0;
4754 tx_ring->next_to_use = i;
4755
4756 return true;
4757 }
4758 return false;
4759}
4760
4761static bool ixgbe_tx_csum(struct ixgbe_adapter *adapter,
b4617240
PW
4762 struct ixgbe_ring *tx_ring,
4763 struct sk_buff *skb, u32 tx_flags)
9a799d71
AK
4764{
4765 struct ixgbe_adv_tx_context_desc *context_desc;
4766 unsigned int i;
4767 struct ixgbe_tx_buffer *tx_buffer_info;
4768 u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0;
4769
4770 if (skb->ip_summed == CHECKSUM_PARTIAL ||
4771 (tx_flags & IXGBE_TX_FLAGS_VLAN)) {
4772 i = tx_ring->next_to_use;
4773 tx_buffer_info = &tx_ring->tx_buffer_info[i];
4774 context_desc = IXGBE_TX_CTXTDESC_ADV(*tx_ring, i);
4775
4776 if (tx_flags & IXGBE_TX_FLAGS_VLAN)
4777 vlan_macip_lens |=
4778 (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK);
4779 vlan_macip_lens |= (skb_network_offset(skb) <<
b4617240 4780 IXGBE_ADVTXD_MACLEN_SHIFT);
9a799d71
AK
4781 if (skb->ip_summed == CHECKSUM_PARTIAL)
4782 vlan_macip_lens |= (skb_transport_header(skb) -
b4617240 4783 skb_network_header(skb));
9a799d71
AK
4784
4785 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
4786 context_desc->seqnum_seed = 0;
4787
4788 type_tucmd_mlhl |= (IXGBE_TXD_CMD_DEXT |
b4617240 4789 IXGBE_ADVTXD_DTYP_CTXT);
9a799d71
AK
4790
4791 if (skb->ip_summed == CHECKSUM_PARTIAL) {
41825d71 4792 switch (skb->protocol) {
09640e63 4793 case cpu_to_be16(ETH_P_IP):
9a799d71 4794 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
41825d71
AK
4795 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
4796 type_tucmd_mlhl |=
b4617240 4797 IXGBE_ADVTXD_TUCMD_L4T_TCP;
45a5ead0
JB
4798 else if (ip_hdr(skb)->protocol == IPPROTO_SCTP)
4799 type_tucmd_mlhl |=
4800 IXGBE_ADVTXD_TUCMD_L4T_SCTP;
41825d71 4801 break;
09640e63 4802 case cpu_to_be16(ETH_P_IPV6):
41825d71
AK
4803 /* XXX what about other V6 headers?? */
4804 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
4805 type_tucmd_mlhl |=
b4617240 4806 IXGBE_ADVTXD_TUCMD_L4T_TCP;
45a5ead0
JB
4807 else if (ipv6_hdr(skb)->nexthdr == IPPROTO_SCTP)
4808 type_tucmd_mlhl |=
4809 IXGBE_ADVTXD_TUCMD_L4T_SCTP;
41825d71 4810 break;
41825d71
AK
4811 default:
4812 if (unlikely(net_ratelimit())) {
4813 DPRINTK(PROBE, WARNING,
4814 "partial checksum but proto=%x!\n",
4815 skb->protocol);
4816 }
4817 break;
4818 }
9a799d71
AK
4819 }
4820
4821 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl);
4eeae6fd 4822 /* use index zero for tx checksum offload */
9a799d71
AK
4823 context_desc->mss_l4len_idx = 0;
4824
4825 tx_buffer_info->time_stamp = jiffies;
4826 tx_buffer_info->next_to_watch = i;
9f8cdf4f 4827
9a799d71
AK
4828 adapter->hw_csum_tx_good++;
4829 i++;
4830 if (i == tx_ring->count)
4831 i = 0;
4832 tx_ring->next_to_use = i;
4833
4834 return true;
4835 }
9f8cdf4f 4836
9a799d71
AK
4837 return false;
4838}
4839
4840static int ixgbe_tx_map(struct ixgbe_adapter *adapter,
b4617240 4841 struct ixgbe_ring *tx_ring,
eacd73f7
YZ
4842 struct sk_buff *skb, u32 tx_flags,
4843 unsigned int first)
9a799d71
AK
4844{
4845 struct ixgbe_tx_buffer *tx_buffer_info;
eacd73f7
YZ
4846 unsigned int len;
4847 unsigned int total = skb->len;
9a799d71
AK
4848 unsigned int offset = 0, size, count = 0, i;
4849 unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
4850 unsigned int f;
44df32c5 4851 dma_addr_t *map;
9a799d71
AK
4852
4853 i = tx_ring->next_to_use;
4854
44df32c5
AD
4855 if (skb_dma_map(&adapter->pdev->dev, skb, DMA_TO_DEVICE)) {
4856 dev_err(&adapter->pdev->dev, "TX DMA map failed\n");
4857 return 0;
4858 }
4859
4860 map = skb_shinfo(skb)->dma_maps;
4861
eacd73f7
YZ
4862 if (tx_flags & IXGBE_TX_FLAGS_FCOE)
4863 /* excluding fcoe_crc_eof for FCoE */
4864 total -= sizeof(struct fcoe_crc_eof);
4865
4866 len = min(skb_headlen(skb), total);
9a799d71
AK
4867 while (len) {
4868 tx_buffer_info = &tx_ring->tx_buffer_info[i];
4869 size = min(len, (uint)IXGBE_MAX_DATA_PER_TXD);
4870
4871 tx_buffer_info->length = size;
042a53a9 4872 tx_buffer_info->dma = skb_shinfo(skb)->dma_head + offset;
9a799d71
AK
4873 tx_buffer_info->time_stamp = jiffies;
4874 tx_buffer_info->next_to_watch = i;
4875
4876 len -= size;
eacd73f7 4877 total -= size;
9a799d71
AK
4878 offset += size;
4879 count++;
44df32c5
AD
4880
4881 if (len) {
4882 i++;
4883 if (i == tx_ring->count)
4884 i = 0;
4885 }
9a799d71
AK
4886 }
4887
4888 for (f = 0; f < nr_frags; f++) {
4889 struct skb_frag_struct *frag;
4890
4891 frag = &skb_shinfo(skb)->frags[f];
eacd73f7 4892 len = min((unsigned int)frag->size, total);
44df32c5 4893 offset = 0;
9a799d71
AK
4894
4895 while (len) {
44df32c5
AD
4896 i++;
4897 if (i == tx_ring->count)
4898 i = 0;
4899
9a799d71
AK
4900 tx_buffer_info = &tx_ring->tx_buffer_info[i];
4901 size = min(len, (uint)IXGBE_MAX_DATA_PER_TXD);
4902
4903 tx_buffer_info->length = size;
042a53a9 4904 tx_buffer_info->dma = map[f] + offset;
9a799d71
AK
4905 tx_buffer_info->time_stamp = jiffies;
4906 tx_buffer_info->next_to_watch = i;
4907
4908 len -= size;
eacd73f7 4909 total -= size;
9a799d71
AK
4910 offset += size;
4911 count++;
9a799d71 4912 }
eacd73f7
YZ
4913 if (total == 0)
4914 break;
9a799d71 4915 }
44df32c5 4916
9a799d71
AK
4917 tx_ring->tx_buffer_info[i].skb = skb;
4918 tx_ring->tx_buffer_info[first].next_to_watch = i;
4919
4920 return count;
4921}
4922
4923static void ixgbe_tx_queue(struct ixgbe_adapter *adapter,
b4617240
PW
4924 struct ixgbe_ring *tx_ring,
4925 int tx_flags, int count, u32 paylen, u8 hdr_len)
9a799d71
AK
4926{
4927 union ixgbe_adv_tx_desc *tx_desc = NULL;
4928 struct ixgbe_tx_buffer *tx_buffer_info;
4929 u32 olinfo_status = 0, cmd_type_len = 0;
4930 unsigned int i;
4931 u32 txd_cmd = IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS | IXGBE_TXD_CMD_IFCS;
4932
4933 cmd_type_len |= IXGBE_ADVTXD_DTYP_DATA;
4934
4935 cmd_type_len |= IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT;
4936
4937 if (tx_flags & IXGBE_TX_FLAGS_VLAN)
4938 cmd_type_len |= IXGBE_ADVTXD_DCMD_VLE;
4939
4940 if (tx_flags & IXGBE_TX_FLAGS_TSO) {
4941 cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE;
4942
4943 olinfo_status |= IXGBE_TXD_POPTS_TXSM <<
b4617240 4944 IXGBE_ADVTXD_POPTS_SHIFT;
9a799d71 4945
4eeae6fd
PW
4946 /* use index 1 context for tso */
4947 olinfo_status |= (1 << IXGBE_ADVTXD_IDX_SHIFT);
9a799d71
AK
4948 if (tx_flags & IXGBE_TX_FLAGS_IPV4)
4949 olinfo_status |= IXGBE_TXD_POPTS_IXSM <<
b4617240 4950 IXGBE_ADVTXD_POPTS_SHIFT;
9a799d71
AK
4951
4952 } else if (tx_flags & IXGBE_TX_FLAGS_CSUM)
4953 olinfo_status |= IXGBE_TXD_POPTS_TXSM <<
b4617240 4954 IXGBE_ADVTXD_POPTS_SHIFT;
9a799d71 4955
eacd73f7
YZ
4956 if (tx_flags & IXGBE_TX_FLAGS_FCOE) {
4957 olinfo_status |= IXGBE_ADVTXD_CC;
4958 olinfo_status |= (1 << IXGBE_ADVTXD_IDX_SHIFT);
4959 if (tx_flags & IXGBE_TX_FLAGS_FSO)
4960 cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE;
4961 }
4962
9a799d71
AK
4963 olinfo_status |= ((paylen - hdr_len) << IXGBE_ADVTXD_PAYLEN_SHIFT);
4964
4965 i = tx_ring->next_to_use;
4966 while (count--) {
4967 tx_buffer_info = &tx_ring->tx_buffer_info[i];
4968 tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, i);
4969 tx_desc->read.buffer_addr = cpu_to_le64(tx_buffer_info->dma);
4970 tx_desc->read.cmd_type_len =
b4617240 4971 cpu_to_le32(cmd_type_len | tx_buffer_info->length);
9a799d71 4972 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
9a799d71
AK
4973 i++;
4974 if (i == tx_ring->count)
4975 i = 0;
4976 }
4977
4978 tx_desc->read.cmd_type_len |= cpu_to_le32(txd_cmd);
4979
4980 /*
4981 * Force memory writes to complete before letting h/w
4982 * know there are new descriptors to fetch. (Only
4983 * applicable for weak-ordered memory model archs,
4984 * such as IA-64).
4985 */
4986 wmb();
4987
4988 tx_ring->next_to_use = i;
4989 writel(i, adapter->hw.hw_addr + tx_ring->tail);
4990}
4991
c4cf55e5
PWJ
4992static void ixgbe_atr(struct ixgbe_adapter *adapter, struct sk_buff *skb,
4993 int queue, u32 tx_flags)
4994{
4995 /* Right now, we support IPv4 only */
4996 struct ixgbe_atr_input atr_input;
4997 struct tcphdr *th;
4998 struct udphdr *uh;
4999 struct iphdr *iph = ip_hdr(skb);
5000 struct ethhdr *eth = (struct ethhdr *)skb->data;
5001 u16 vlan_id, src_port, dst_port, flex_bytes;
5002 u32 src_ipv4_addr, dst_ipv4_addr;
5003 u8 l4type = 0;
5004
5005 /* check if we're UDP or TCP */
5006 if (iph->protocol == IPPROTO_TCP) {
5007 th = tcp_hdr(skb);
5008 src_port = th->source;
5009 dst_port = th->dest;
5010 l4type |= IXGBE_ATR_L4TYPE_TCP;
5011 /* l4type IPv4 type is 0, no need to assign */
5012 } else if(iph->protocol == IPPROTO_UDP) {
5013 uh = udp_hdr(skb);
5014 src_port = uh->source;
5015 dst_port = uh->dest;
5016 l4type |= IXGBE_ATR_L4TYPE_UDP;
5017 /* l4type IPv4 type is 0, no need to assign */
5018 } else {
5019 /* Unsupported L4 header, just bail here */
5020 return;
5021 }
5022
5023 memset(&atr_input, 0, sizeof(struct ixgbe_atr_input));
5024
5025 vlan_id = (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK) >>
5026 IXGBE_TX_FLAGS_VLAN_SHIFT;
5027 src_ipv4_addr = iph->saddr;
5028 dst_ipv4_addr = iph->daddr;
5029 flex_bytes = eth->h_proto;
5030
5031 ixgbe_atr_set_vlan_id_82599(&atr_input, vlan_id);
5032 ixgbe_atr_set_src_port_82599(&atr_input, dst_port);
5033 ixgbe_atr_set_dst_port_82599(&atr_input, src_port);
5034 ixgbe_atr_set_flex_byte_82599(&atr_input, flex_bytes);
5035 ixgbe_atr_set_l4type_82599(&atr_input, l4type);
5036 /* src and dst are inverted, think how the receiver sees them */
5037 ixgbe_atr_set_src_ipv4_82599(&atr_input, dst_ipv4_addr);
5038 ixgbe_atr_set_dst_ipv4_82599(&atr_input, src_ipv4_addr);
5039
5040 /* This assumes the Rx queue and Tx queue are bound to the same CPU */
5041 ixgbe_fdir_add_signature_filter_82599(&adapter->hw, &atr_input, queue);
5042}
5043
e092be60 5044static int __ixgbe_maybe_stop_tx(struct net_device *netdev,
b4617240 5045 struct ixgbe_ring *tx_ring, int size)
e092be60
AV
5046{
5047 struct ixgbe_adapter *adapter = netdev_priv(netdev);
5048
30eba97a 5049 netif_stop_subqueue(netdev, tx_ring->queue_index);
e092be60
AV
5050 /* Herbert's original patch had:
5051 * smp_mb__after_netif_stop_queue();
5052 * but since that doesn't exist yet, just open code it. */
5053 smp_mb();
5054
5055 /* We need to check again in a case another CPU has just
5056 * made room available. */
5057 if (likely(IXGBE_DESC_UNUSED(tx_ring) < size))
5058 return -EBUSY;
5059
5060 /* A reprieve! - use start_queue because it doesn't call schedule */
af72166f 5061 netif_start_subqueue(netdev, tx_ring->queue_index);
e092be60
AV
5062 ++adapter->restart_queue;
5063 return 0;
5064}
5065
5066static int ixgbe_maybe_stop_tx(struct net_device *netdev,
b4617240 5067 struct ixgbe_ring *tx_ring, int size)
e092be60
AV
5068{
5069 if (likely(IXGBE_DESC_UNUSED(tx_ring) >= size))
5070 return 0;
5071 return __ixgbe_maybe_stop_tx(netdev, tx_ring, size);
5072}
5073
09a3b1f8
SH
5074static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb)
5075{
5076 struct ixgbe_adapter *adapter = netdev_priv(dev);
5077
c4cf55e5
PWJ
5078 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE)
5079 return smp_processor_id();
5080
09a3b1f8
SH
5081 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED)
5082 return 0; /* All traffic should default to class 0 */
5083
5084 return skb_tx_hash(dev, skb);
5085}
5086
9a799d71
AK
5087static int ixgbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
5088{
5089 struct ixgbe_adapter *adapter = netdev_priv(netdev);
5090 struct ixgbe_ring *tx_ring;
9a799d71
AK
5091 unsigned int first;
5092 unsigned int tx_flags = 0;
30eba97a
AV
5093 u8 hdr_len = 0;
5094 int r_idx = 0, tso;
9a799d71
AK
5095 int count = 0;
5096 unsigned int f;
9f8cdf4f 5097
95615d90 5098 r_idx = skb->queue_mapping;
30eba97a 5099 tx_ring = &adapter->tx_ring[r_idx];
9a799d71 5100
9f8cdf4f
JB
5101 if (adapter->vlgrp && vlan_tx_tag_present(skb)) {
5102 tx_flags |= vlan_tx_tag_get(skb);
2f90b865
AD
5103 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
5104 tx_flags &= ~IXGBE_TX_FLAGS_VLAN_PRIO_MASK;
5105 tx_flags |= (skb->queue_mapping << 13);
5106 }
5107 tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT;
5108 tx_flags |= IXGBE_TX_FLAGS_VLAN;
5109 } else if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
5110 tx_flags |= (skb->queue_mapping << 13);
9f8cdf4f
JB
5111 tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT;
5112 tx_flags |= IXGBE_TX_FLAGS_VLAN;
9a799d71 5113 }
eacd73f7
YZ
5114
5115 if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED) &&
5116 (skb->protocol == htons(ETH_P_FCOE)))
5117 tx_flags |= IXGBE_TX_FLAGS_FCOE;
5118
5119 /* four things can cause us to need a context descriptor */
9f8cdf4f
JB
5120 if (skb_is_gso(skb) ||
5121 (skb->ip_summed == CHECKSUM_PARTIAL) ||
eacd73f7
YZ
5122 (tx_flags & IXGBE_TX_FLAGS_VLAN) ||
5123 (tx_flags & IXGBE_TX_FLAGS_FCOE))
9a799d71
AK
5124 count++;
5125
9f8cdf4f
JB
5126 count += TXD_USE_COUNT(skb_headlen(skb));
5127 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
9a799d71
AK
5128 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
5129
e092be60 5130 if (ixgbe_maybe_stop_tx(netdev, tx_ring, count)) {
9a799d71 5131 adapter->tx_busy++;
9a799d71
AK
5132 return NETDEV_TX_BUSY;
5133 }
9a799d71 5134
9a799d71 5135 first = tx_ring->next_to_use;
eacd73f7
YZ
5136 if (tx_flags & IXGBE_TX_FLAGS_FCOE) {
5137#ifdef IXGBE_FCOE
5138 /* setup tx offload for FCoE */
5139 tso = ixgbe_fso(adapter, tx_ring, skb, tx_flags, &hdr_len);
5140 if (tso < 0) {
5141 dev_kfree_skb_any(skb);
5142 return NETDEV_TX_OK;
5143 }
5144 if (tso)
5145 tx_flags |= IXGBE_TX_FLAGS_FSO;
5146#endif /* IXGBE_FCOE */
5147 } else {
5148 if (skb->protocol == htons(ETH_P_IP))
5149 tx_flags |= IXGBE_TX_FLAGS_IPV4;
5150 tso = ixgbe_tso(adapter, tx_ring, skb, tx_flags, &hdr_len);
5151 if (tso < 0) {
5152 dev_kfree_skb_any(skb);
5153 return NETDEV_TX_OK;
5154 }
9a799d71 5155
eacd73f7
YZ
5156 if (tso)
5157 tx_flags |= IXGBE_TX_FLAGS_TSO;
5158 else if (ixgbe_tx_csum(adapter, tx_ring, skb, tx_flags) &&
5159 (skb->ip_summed == CHECKSUM_PARTIAL))
5160 tx_flags |= IXGBE_TX_FLAGS_CSUM;
5161 }
9a799d71 5162
eacd73f7 5163 count = ixgbe_tx_map(adapter, tx_ring, skb, tx_flags, first);
44df32c5 5164 if (count) {
c4cf55e5
PWJ
5165 /* add the ATR filter if ATR is on */
5166 if (tx_ring->atr_sample_rate) {
5167 ++tx_ring->atr_count;
5168 if ((tx_ring->atr_count >= tx_ring->atr_sample_rate) &&
5169 test_bit(__IXGBE_FDIR_INIT_DONE,
5170 &tx_ring->reinit_state)) {
5171 ixgbe_atr(adapter, skb, tx_ring->queue_index,
5172 tx_flags);
5173 tx_ring->atr_count = 0;
5174 }
5175 }
44df32c5
AD
5176 ixgbe_tx_queue(adapter, tx_ring, tx_flags, count, skb->len,
5177 hdr_len);
44df32c5 5178 ixgbe_maybe_stop_tx(netdev, tx_ring, DESC_NEEDED);
9a799d71 5179
44df32c5
AD
5180 } else {
5181 dev_kfree_skb_any(skb);
5182 tx_ring->tx_buffer_info[first].time_stamp = 0;
5183 tx_ring->next_to_use = first;
5184 }
9a799d71
AK
5185
5186 return NETDEV_TX_OK;
5187}
5188
5189/**
5190 * ixgbe_get_stats - Get System Network Statistics
5191 * @netdev: network interface device structure
5192 *
5193 * Returns the address of the device statistics structure.
5194 * The statistics are actually updated from the timer callback.
5195 **/
5196static struct net_device_stats *ixgbe_get_stats(struct net_device *netdev)
5197{
5198 struct ixgbe_adapter *adapter = netdev_priv(netdev);
5199
5200 /* only return the current stats */
5201 return &adapter->net_stats;
5202}
5203
5204/**
5205 * ixgbe_set_mac - Change the Ethernet Address of the NIC
5206 * @netdev: network interface device structure
5207 * @p: pointer to an address structure
5208 *
5209 * Returns 0 on success, negative on failure
5210 **/
5211static int ixgbe_set_mac(struct net_device *netdev, void *p)
5212{
5213 struct ixgbe_adapter *adapter = netdev_priv(netdev);
b4617240 5214 struct ixgbe_hw *hw = &adapter->hw;
9a799d71
AK
5215 struct sockaddr *addr = p;
5216
5217 if (!is_valid_ether_addr(addr->sa_data))
5218 return -EADDRNOTAVAIL;
5219
5220 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
b4617240 5221 memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
9a799d71 5222
b4617240 5223 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
9a799d71
AK
5224
5225 return 0;
5226}
5227
6b73e10d
BH
5228static int
5229ixgbe_mdio_read(struct net_device *netdev, int prtad, int devad, u16 addr)
5230{
5231 struct ixgbe_adapter *adapter = netdev_priv(netdev);
5232 struct ixgbe_hw *hw = &adapter->hw;
5233 u16 value;
5234 int rc;
5235
5236 if (prtad != hw->phy.mdio.prtad)
5237 return -EINVAL;
5238 rc = hw->phy.ops.read_reg(hw, addr, devad, &value);
5239 if (!rc)
5240 rc = value;
5241 return rc;
5242}
5243
5244static int ixgbe_mdio_write(struct net_device *netdev, int prtad, int devad,
5245 u16 addr, u16 value)
5246{
5247 struct ixgbe_adapter *adapter = netdev_priv(netdev);
5248 struct ixgbe_hw *hw = &adapter->hw;
5249
5250 if (prtad != hw->phy.mdio.prtad)
5251 return -EINVAL;
5252 return hw->phy.ops.write_reg(hw, addr, devad, value);
5253}
5254
5255static int ixgbe_ioctl(struct net_device *netdev, struct ifreq *req, int cmd)
5256{
5257 struct ixgbe_adapter *adapter = netdev_priv(netdev);
5258
5259 return mdio_mii_ioctl(&adapter->hw.phy.mdio, if_mii(req), cmd);
5260}
5261
0365e6e4
PW
5262/**
5263 * ixgbe_add_sanmac_netdev - Add the SAN MAC address to the corresponding
5264 * netdev->dev_addr_list
5265 * @netdev: network interface device structure
5266 *
5267 * Returns non-zero on failure
5268 **/
5269static int ixgbe_add_sanmac_netdev(struct net_device *dev)
5270{
5271 int err = 0;
5272 struct ixgbe_adapter *adapter = netdev_priv(dev);
5273 struct ixgbe_mac_info *mac = &adapter->hw.mac;
5274
5275 if (is_valid_ether_addr(mac->san_addr)) {
5276 rtnl_lock();
5277 err = dev_addr_add(dev, mac->san_addr, NETDEV_HW_ADDR_T_SAN);
5278 rtnl_unlock();
5279 }
5280 return err;
5281}
5282
5283/**
5284 * ixgbe_del_sanmac_netdev - Removes the SAN MAC address to the corresponding
5285 * netdev->dev_addr_list
5286 * @netdev: network interface device structure
5287 *
5288 * Returns non-zero on failure
5289 **/
5290static int ixgbe_del_sanmac_netdev(struct net_device *dev)
5291{
5292 int err = 0;
5293 struct ixgbe_adapter *adapter = netdev_priv(dev);
5294 struct ixgbe_mac_info *mac = &adapter->hw.mac;
5295
5296 if (is_valid_ether_addr(mac->san_addr)) {
5297 rtnl_lock();
5298 err = dev_addr_del(dev, mac->san_addr, NETDEV_HW_ADDR_T_SAN);
5299 rtnl_unlock();
5300 }
5301 return err;
5302}
5303
9a799d71
AK
5304#ifdef CONFIG_NET_POLL_CONTROLLER
5305/*
5306 * Polling 'interrupt' - used by things like netconsole to send skbs
5307 * without having to re-enable interrupts. It's not called while
5308 * the interrupt routine is executing.
5309 */
5310static void ixgbe_netpoll(struct net_device *netdev)
5311{
5312 struct ixgbe_adapter *adapter = netdev_priv(netdev);
5313
5314 disable_irq(adapter->pdev->irq);
5315 adapter->flags |= IXGBE_FLAG_IN_NETPOLL;
5316 ixgbe_intr(adapter->pdev->irq, netdev);
5317 adapter->flags &= ~IXGBE_FLAG_IN_NETPOLL;
5318 enable_irq(adapter->pdev->irq);
5319}
5320#endif
5321
0edc3527
SH
5322static const struct net_device_ops ixgbe_netdev_ops = {
5323 .ndo_open = ixgbe_open,
5324 .ndo_stop = ixgbe_close,
00829823 5325 .ndo_start_xmit = ixgbe_xmit_frame,
09a3b1f8 5326 .ndo_select_queue = ixgbe_select_queue,
0edc3527 5327 .ndo_get_stats = ixgbe_get_stats,
e90d400c 5328 .ndo_set_rx_mode = ixgbe_set_rx_mode,
0edc3527
SH
5329 .ndo_set_multicast_list = ixgbe_set_rx_mode,
5330 .ndo_validate_addr = eth_validate_addr,
5331 .ndo_set_mac_address = ixgbe_set_mac,
5332 .ndo_change_mtu = ixgbe_change_mtu,
5333 .ndo_tx_timeout = ixgbe_tx_timeout,
5334 .ndo_vlan_rx_register = ixgbe_vlan_rx_register,
5335 .ndo_vlan_rx_add_vid = ixgbe_vlan_rx_add_vid,
5336 .ndo_vlan_rx_kill_vid = ixgbe_vlan_rx_kill_vid,
6b73e10d 5337 .ndo_do_ioctl = ixgbe_ioctl,
0edc3527
SH
5338#ifdef CONFIG_NET_POLL_CONTROLLER
5339 .ndo_poll_controller = ixgbe_netpoll,
5340#endif
332d4a7d
YZ
5341#ifdef IXGBE_FCOE
5342 .ndo_fcoe_ddp_setup = ixgbe_fcoe_ddp_get,
5343 .ndo_fcoe_ddp_done = ixgbe_fcoe_ddp_put,
5344#endif /* IXGBE_FCOE */
0edc3527
SH
5345};
5346
9a799d71
AK
5347/**
5348 * ixgbe_probe - Device Initialization Routine
5349 * @pdev: PCI device information struct
5350 * @ent: entry in ixgbe_pci_tbl
5351 *
5352 * Returns 0 on success, negative on failure
5353 *
5354 * ixgbe_probe initializes an adapter identified by a pci_dev structure.
5355 * The OS initialization, configuring of the adapter private structure,
5356 * and a hardware reset occur.
5357 **/
5358static int __devinit ixgbe_probe(struct pci_dev *pdev,
b4617240 5359 const struct pci_device_id *ent)
9a799d71
AK
5360{
5361 struct net_device *netdev;
5362 struct ixgbe_adapter *adapter = NULL;
5363 struct ixgbe_hw *hw;
5364 const struct ixgbe_info *ii = ixgbe_info_tbl[ent->driver_data];
9a799d71
AK
5365 static int cards_found;
5366 int i, err, pci_using_dac;
eacd73f7
YZ
5367#ifdef IXGBE_FCOE
5368 u16 device_caps;
5369#endif
c44ade9e 5370 u32 part_num, eec;
9a799d71 5371
9ce77666 5372 err = pci_enable_device_mem(pdev);
9a799d71
AK
5373 if (err)
5374 return err;
5375
6a35528a
YH
5376 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) &&
5377 !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
9a799d71
AK
5378 pci_using_dac = 1;
5379 } else {
284901a9 5380 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
9a799d71 5381 if (err) {
284901a9 5382 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
9a799d71 5383 if (err) {
b4617240
PW
5384 dev_err(&pdev->dev, "No usable DMA "
5385 "configuration, aborting\n");
9a799d71
AK
5386 goto err_dma;
5387 }
5388 }
5389 pci_using_dac = 0;
5390 }
5391
9ce77666 5392 err = pci_request_selected_regions(pdev, pci_select_bars(pdev,
5393 IORESOURCE_MEM), ixgbe_driver_name);
9a799d71 5394 if (err) {
9ce77666 5395 dev_err(&pdev->dev,
5396 "pci_request_selected_regions failed 0x%x\n", err);
9a799d71
AK
5397 goto err_pci_reg;
5398 }
5399
6fabd715
PWJ
5400 err = pci_enable_pcie_error_reporting(pdev);
5401 if (err) {
5402 dev_err(&pdev->dev, "pci_enable_pcie_error_reporting failed "
5403 "0x%x\n", err);
5404 /* non-fatal, continue */
5405 }
5406
9a799d71 5407 pci_set_master(pdev);
fb3b27bc 5408 pci_save_state(pdev);
9a799d71 5409
30eba97a 5410 netdev = alloc_etherdev_mq(sizeof(struct ixgbe_adapter), MAX_TX_QUEUES);
9a799d71
AK
5411 if (!netdev) {
5412 err = -ENOMEM;
5413 goto err_alloc_etherdev;
5414 }
5415
9a799d71
AK
5416 SET_NETDEV_DEV(netdev, &pdev->dev);
5417
5418 pci_set_drvdata(pdev, netdev);
5419 adapter = netdev_priv(netdev);
5420
5421 adapter->netdev = netdev;
5422 adapter->pdev = pdev;
5423 hw = &adapter->hw;
5424 hw->back = adapter;
5425 adapter->msg_enable = (1 << DEFAULT_DEBUG_LEVEL_SHIFT) - 1;
5426
05857980
JK
5427 hw->hw_addr = ioremap(pci_resource_start(pdev, 0),
5428 pci_resource_len(pdev, 0));
9a799d71
AK
5429 if (!hw->hw_addr) {
5430 err = -EIO;
5431 goto err_ioremap;
5432 }
5433
5434 for (i = 1; i <= 5; i++) {
5435 if (pci_resource_len(pdev, i) == 0)
5436 continue;
5437 }
5438
0edc3527 5439 netdev->netdev_ops = &ixgbe_netdev_ops;
9a799d71 5440 ixgbe_set_ethtool_ops(netdev);
9a799d71 5441 netdev->watchdog_timeo = 5 * HZ;
9a799d71
AK
5442 strcpy(netdev->name, pci_name(pdev));
5443
9a799d71
AK
5444 adapter->bd_number = cards_found;
5445
9a799d71
AK
5446 /* Setup hw api */
5447 memcpy(&hw->mac.ops, ii->mac_ops, sizeof(hw->mac.ops));
021230d4 5448 hw->mac.type = ii->mac;
9a799d71 5449
c44ade9e
JB
5450 /* EEPROM */
5451 memcpy(&hw->eeprom.ops, ii->eeprom_ops, sizeof(hw->eeprom.ops));
5452 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
5453 /* If EEPROM is valid (bit 8 = 1), use default otherwise use bit bang */
5454 if (!(eec & (1 << 8)))
5455 hw->eeprom.ops.read = &ixgbe_read_eeprom_bit_bang_generic;
5456
5457 /* PHY */
5458 memcpy(&hw->phy.ops, ii->phy_ops, sizeof(hw->phy.ops));
c4900be0 5459 hw->phy.sfp_type = ixgbe_sfp_type_unknown;
6b73e10d
BH
5460 /* ixgbe_identify_phy_generic will set prtad and mmds properly */
5461 hw->phy.mdio.prtad = MDIO_PRTAD_NONE;
5462 hw->phy.mdio.mmds = 0;
5463 hw->phy.mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
5464 hw->phy.mdio.dev = netdev;
5465 hw->phy.mdio.mdio_read = ixgbe_mdio_read;
5466 hw->phy.mdio.mdio_write = ixgbe_mdio_write;
c4900be0
DS
5467
5468 /* set up this timer and work struct before calling get_invariants
5469 * which might start the timer
5470 */
5471 init_timer(&adapter->sfp_timer);
5472 adapter->sfp_timer.function = &ixgbe_sfp_timer;
5473 adapter->sfp_timer.data = (unsigned long) adapter;
5474
5475 INIT_WORK(&adapter->sfp_task, ixgbe_sfp_task);
c44ade9e 5476
e8e26350
PW
5477 /* multispeed fiber has its own tasklet, called from GPI SDP1 context */
5478 INIT_WORK(&adapter->multispeed_fiber_task, ixgbe_multispeed_fiber_task);
5479
5480 /* a new SFP+ module arrival, called from GPI SDP2 context */
5481 INIT_WORK(&adapter->sfp_config_module_task,
5482 ixgbe_sfp_config_module_task);
5483
8ca783ab 5484 ii->get_invariants(hw);
9a799d71
AK
5485
5486 /* setup the private structure */
5487 err = ixgbe_sw_init(adapter);
5488 if (err)
5489 goto err_sw_init;
5490
bf069c97
DS
5491 /*
5492 * If there is a fan on this device and it has failed log the
5493 * failure.
5494 */
5495 if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) {
5496 u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
5497 if (esdp & IXGBE_ESDP_SDP1)
5498 DPRINTK(PROBE, CRIT,
5499 "Fan has stopped, replace the adapter\n");
5500 }
5501
c44ade9e
JB
5502 /* reset_hw fills in the perm_addr as well */
5503 err = hw->mac.ops.reset_hw(hw);
8ca783ab
DS
5504 if (err == IXGBE_ERR_SFP_NOT_PRESENT &&
5505 hw->mac.type == ixgbe_mac_82598EB) {
5506 /*
5507 * Start a kernel thread to watch for a module to arrive.
5508 * Only do this for 82598, since 82599 will generate
5509 * interrupts on module arrival.
5510 */
5511 set_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state);
5512 mod_timer(&adapter->sfp_timer,
5513 round_jiffies(jiffies + (2 * HZ)));
5514 err = 0;
5515 } else if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
04f165ef
PW
5516 dev_err(&adapter->pdev->dev, "failed to load because an "
5517 "unsupported SFP+ module type was detected.\n");
5518 goto err_sw_init;
5519 } else if (err) {
c44ade9e
JB
5520 dev_err(&adapter->pdev->dev, "HW Init failed: %d\n", err);
5521 goto err_sw_init;
5522 }
5523
9a799d71 5524 netdev->features = NETIF_F_SG |
b4617240
PW
5525 NETIF_F_IP_CSUM |
5526 NETIF_F_HW_VLAN_TX |
5527 NETIF_F_HW_VLAN_RX |
5528 NETIF_F_HW_VLAN_FILTER;
9a799d71 5529
e9990a9c 5530 netdev->features |= NETIF_F_IPV6_CSUM;
9a799d71 5531 netdev->features |= NETIF_F_TSO;
9a799d71 5532 netdev->features |= NETIF_F_TSO6;
78b6f4ce 5533 netdev->features |= NETIF_F_GRO;
ad31c402 5534
45a5ead0
JB
5535 if (adapter->hw.mac.type == ixgbe_mac_82599EB)
5536 netdev->features |= NETIF_F_SCTP_CSUM;
5537
ad31c402
JK
5538 netdev->vlan_features |= NETIF_F_TSO;
5539 netdev->vlan_features |= NETIF_F_TSO6;
22f32b7a 5540 netdev->vlan_features |= NETIF_F_IP_CSUM;
ad31c402
JK
5541 netdev->vlan_features |= NETIF_F_SG;
5542
2f90b865
AD
5543 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED)
5544 adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED;
5545
7a6b6f51 5546#ifdef CONFIG_IXGBE_DCB
2f90b865
AD
5547 netdev->dcbnl_ops = &dcbnl_ops;
5548#endif
5549
eacd73f7
YZ
5550#ifdef IXGBE_FCOE
5551 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
5552 if (hw->mac.ops.get_device_caps) {
5553 hw->mac.ops.get_device_caps(hw, &device_caps);
5554 if (!(device_caps & IXGBE_DEVICE_CAPS_FCOE_OFFLOADS)) {
5555 netdev->features |= NETIF_F_FCOE_CRC;
5556 netdev->features |= NETIF_F_FSO;
332d4a7d 5557 netdev->fcoe_ddp_xid = IXGBE_FCOE_DDP_MAX - 1;
c4cf55e5
PWJ
5558 DPRINTK(DRV, INFO, "FCoE enabled, "
5559 "disabling Flow Director\n");
5560 adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
5561 adapter->flags &=
5562 ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
5563 adapter->atr_sample_rate = 0;
eacd73f7
YZ
5564 } else {
5565 adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED;
5566 }
5567 }
5568 }
5569#endif /* IXGBE_FCOE */
9a799d71
AK
5570 if (pci_using_dac)
5571 netdev->features |= NETIF_F_HIGHDMA;
5572
df647b5c 5573 if (adapter->flags & IXGBE_FLAG2_RSC_ENABLED)
f8212f97
AD
5574 netdev->features |= NETIF_F_LRO;
5575
9a799d71 5576 /* make sure the EEPROM is good */
c44ade9e 5577 if (hw->eeprom.ops.validate_checksum(hw, NULL) < 0) {
9a799d71
AK
5578 dev_err(&pdev->dev, "The EEPROM Checksum Is Not Valid\n");
5579 err = -EIO;
5580 goto err_eeprom;
5581 }
5582
5583 memcpy(netdev->dev_addr, hw->mac.perm_addr, netdev->addr_len);
5584 memcpy(netdev->perm_addr, hw->mac.perm_addr, netdev->addr_len);
5585
c44ade9e
JB
5586 if (ixgbe_validate_mac_addr(netdev->perm_addr)) {
5587 dev_err(&pdev->dev, "invalid MAC address\n");
9a799d71
AK
5588 err = -EIO;
5589 goto err_eeprom;
5590 }
5591
5592 init_timer(&adapter->watchdog_timer);
5593 adapter->watchdog_timer.function = &ixgbe_watchdog;
5594 adapter->watchdog_timer.data = (unsigned long)adapter;
5595
5596 INIT_WORK(&adapter->reset_task, ixgbe_reset_task);
cf8280ee 5597 INIT_WORK(&adapter->watchdog_task, ixgbe_watchdog_task);
9a799d71 5598
021230d4
AV
5599 err = ixgbe_init_interrupt_scheme(adapter);
5600 if (err)
5601 goto err_sw_init;
9a799d71 5602
e8e26350
PW
5603 switch (pdev->device) {
5604 case IXGBE_DEV_ID_82599_KX4:
495dce12
WJP
5605 adapter->wol = (IXGBE_WUFC_MAG | IXGBE_WUFC_EX |
5606 IXGBE_WUFC_MC | IXGBE_WUFC_BC);
bdf0a550
PWJ
5607 /* Enable ACPI wakeup in GRC */
5608 IXGBE_WRITE_REG(hw, IXGBE_GRC,
5609 (IXGBE_READ_REG(hw, IXGBE_GRC) & ~IXGBE_GRC_APME));
e8e26350
PW
5610 break;
5611 default:
5612 adapter->wol = 0;
5613 break;
5614 }
5615 device_init_wakeup(&adapter->pdev->dev, true);
5616 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
5617
04f165ef
PW
5618 /* pick up the PCI bus settings for reporting later */
5619 hw->mac.ops.get_bus_info(hw);
5620
9a799d71 5621 /* print bus type/speed/width info */
7c510e4b 5622 dev_info(&pdev->dev, "(PCI Express:%s:%s) %pM\n",
e8e26350
PW
5623 ((hw->bus.speed == ixgbe_bus_speed_5000) ? "5.0Gb/s":
5624 (hw->bus.speed == ixgbe_bus_speed_2500) ? "2.5Gb/s":"Unknown"),
5625 ((hw->bus.width == ixgbe_bus_width_pcie_x8) ? "Width x8" :
5626 (hw->bus.width == ixgbe_bus_width_pcie_x4) ? "Width x4" :
5627 (hw->bus.width == ixgbe_bus_width_pcie_x1) ? "Width x1" :
b4617240 5628 "Unknown"),
7c510e4b 5629 netdev->dev_addr);
c44ade9e 5630 ixgbe_read_pba_num_generic(hw, &part_num);
e8e26350
PW
5631 if (ixgbe_is_sfp(hw) && hw->phy.sfp_type != ixgbe_sfp_type_not_present)
5632 dev_info(&pdev->dev, "MAC: %d, PHY: %d, SFP+: %d, PBA No: %06x-%03x\n",
5633 hw->mac.type, hw->phy.type, hw->phy.sfp_type,
5634 (part_num >> 8), (part_num & 0xff));
5635 else
5636 dev_info(&pdev->dev, "MAC: %d, PHY: %d, PBA No: %06x-%03x\n",
5637 hw->mac.type, hw->phy.type,
5638 (part_num >> 8), (part_num & 0xff));
9a799d71 5639
e8e26350 5640 if (hw->bus.width <= ixgbe_bus_width_pcie_x4) {
0c254d86 5641 dev_warn(&pdev->dev, "PCI-Express bandwidth available for "
b4617240
PW
5642 "this card is not sufficient for optimal "
5643 "performance.\n");
0c254d86 5644 dev_warn(&pdev->dev, "For optimal performance a x8 "
b4617240 5645 "PCI-Express slot is required.\n");
0c254d86
AK
5646 }
5647
34b0368c
PWJ
5648 /* save off EEPROM version number */
5649 hw->eeprom.ops.read(hw, 0x29, &adapter->eeprom_version);
5650
9a799d71 5651 /* reset the hardware with the new settings */
794caeb2 5652 err = hw->mac.ops.start_hw(hw);
c44ade9e 5653
794caeb2
PWJ
5654 if (err == IXGBE_ERR_EEPROM_VERSION) {
5655 /* We are running on a pre-production device, log a warning */
5656 dev_warn(&pdev->dev, "This device is a pre-production "
5657 "adapter/LOM. Please be aware there may be issues "
5658 "associated with your hardware. If you are "
5659 "experiencing problems please contact your Intel or "
5660 "hardware representative who provided you with this "
5661 "hardware.\n");
5662 }
9a799d71
AK
5663 strcpy(netdev->name, "eth%d");
5664 err = register_netdev(netdev);
5665 if (err)
5666 goto err_register;
5667
54386467
JB
5668 /* carrier off reporting is important to ethtool even BEFORE open */
5669 netif_carrier_off(netdev);
5670
c4cf55e5
PWJ
5671 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE ||
5672 adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)
5673 INIT_WORK(&adapter->fdir_reinit_task, ixgbe_fdir_reinit_task);
5674
5dd2d332 5675#ifdef CONFIG_IXGBE_DCA
652f093f 5676 if (dca_add_requester(&pdev->dev) == 0) {
bd0362dd 5677 adapter->flags |= IXGBE_FLAG_DCA_ENABLED;
bd0362dd
JC
5678 ixgbe_setup_dca(adapter);
5679 }
5680#endif
0365e6e4
PW
5681 /* add san mac addr to netdev */
5682 ixgbe_add_sanmac_netdev(netdev);
9a799d71
AK
5683
5684 dev_info(&pdev->dev, "Intel(R) 10 Gigabit Network Connection\n");
5685 cards_found++;
5686 return 0;
5687
5688err_register:
5eba3699 5689 ixgbe_release_hw_control(adapter);
7a921c93 5690 ixgbe_clear_interrupt_scheme(adapter);
9a799d71
AK
5691err_sw_init:
5692err_eeprom:
c4900be0
DS
5693 clear_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state);
5694 del_timer_sync(&adapter->sfp_timer);
5695 cancel_work_sync(&adapter->sfp_task);
e8e26350
PW
5696 cancel_work_sync(&adapter->multispeed_fiber_task);
5697 cancel_work_sync(&adapter->sfp_config_module_task);
9a799d71
AK
5698 iounmap(hw->hw_addr);
5699err_ioremap:
5700 free_netdev(netdev);
5701err_alloc_etherdev:
9ce77666 5702 pci_release_selected_regions(pdev, pci_select_bars(pdev,
5703 IORESOURCE_MEM));
9a799d71
AK
5704err_pci_reg:
5705err_dma:
5706 pci_disable_device(pdev);
5707 return err;
5708}
5709
5710/**
5711 * ixgbe_remove - Device Removal Routine
5712 * @pdev: PCI device information struct
5713 *
5714 * ixgbe_remove is called by the PCI subsystem to alert the driver
5715 * that it should release a PCI device. The could be caused by a
5716 * Hot-Plug event, or because the driver is going to be removed from
5717 * memory.
5718 **/
5719static void __devexit ixgbe_remove(struct pci_dev *pdev)
5720{
5721 struct net_device *netdev = pci_get_drvdata(pdev);
5722 struct ixgbe_adapter *adapter = netdev_priv(netdev);
6fabd715 5723 int err;
9a799d71
AK
5724
5725 set_bit(__IXGBE_DOWN, &adapter->state);
c4900be0
DS
5726 /* clear the module not found bit to make sure the worker won't
5727 * reschedule
5728 */
5729 clear_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state);
9a799d71
AK
5730 del_timer_sync(&adapter->watchdog_timer);
5731
c4900be0
DS
5732 del_timer_sync(&adapter->sfp_timer);
5733 cancel_work_sync(&adapter->watchdog_task);
5734 cancel_work_sync(&adapter->sfp_task);
e8e26350
PW
5735 cancel_work_sync(&adapter->multispeed_fiber_task);
5736 cancel_work_sync(&adapter->sfp_config_module_task);
c4cf55e5
PWJ
5737 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE ||
5738 adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)
5739 cancel_work_sync(&adapter->fdir_reinit_task);
9a799d71
AK
5740 flush_scheduled_work();
5741
5dd2d332 5742#ifdef CONFIG_IXGBE_DCA
bd0362dd
JC
5743 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
5744 adapter->flags &= ~IXGBE_FLAG_DCA_ENABLED;
5745 dca_remove_requester(&pdev->dev);
5746 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 1);
5747 }
5748
5749#endif
332d4a7d
YZ
5750#ifdef IXGBE_FCOE
5751 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)
5752 ixgbe_cleanup_fcoe(adapter);
5753
5754#endif /* IXGBE_FCOE */
0365e6e4
PW
5755
5756 /* remove the added san mac */
5757 ixgbe_del_sanmac_netdev(netdev);
5758
c4900be0
DS
5759 if (netdev->reg_state == NETREG_REGISTERED)
5760 unregister_netdev(netdev);
9a799d71 5761
7a921c93 5762 ixgbe_clear_interrupt_scheme(adapter);
5eba3699 5763
021230d4 5764 ixgbe_release_hw_control(adapter);
9a799d71
AK
5765
5766 iounmap(adapter->hw.hw_addr);
9ce77666 5767 pci_release_selected_regions(pdev, pci_select_bars(pdev,
5768 IORESOURCE_MEM));
9a799d71 5769
021230d4 5770 DPRINTK(PROBE, INFO, "complete\n");
021230d4 5771
9a799d71
AK
5772 free_netdev(netdev);
5773
6fabd715
PWJ
5774 err = pci_disable_pcie_error_reporting(pdev);
5775 if (err)
5776 dev_err(&pdev->dev,
5777 "pci_disable_pcie_error_reporting failed 0x%x\n", err);
5778
9a799d71
AK
5779 pci_disable_device(pdev);
5780}
5781
5782/**
5783 * ixgbe_io_error_detected - called when PCI error is detected
5784 * @pdev: Pointer to PCI device
5785 * @state: The current pci connection state
5786 *
5787 * This function is called after a PCI bus error affecting
5788 * this device has been detected.
5789 */
5790static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev,
b4617240 5791 pci_channel_state_t state)
9a799d71
AK
5792{
5793 struct net_device *netdev = pci_get_drvdata(pdev);
454d7c9b 5794 struct ixgbe_adapter *adapter = netdev_priv(netdev);
9a799d71
AK
5795
5796 netif_device_detach(netdev);
5797
3044b8d1
BL
5798 if (state == pci_channel_io_perm_failure)
5799 return PCI_ERS_RESULT_DISCONNECT;
5800
9a799d71
AK
5801 if (netif_running(netdev))
5802 ixgbe_down(adapter);
5803 pci_disable_device(pdev);
5804
b4617240 5805 /* Request a slot reset. */
9a799d71
AK
5806 return PCI_ERS_RESULT_NEED_RESET;
5807}
5808
5809/**
5810 * ixgbe_io_slot_reset - called after the pci bus has been reset.
5811 * @pdev: Pointer to PCI device
5812 *
5813 * Restart the card from scratch, as if from a cold-boot.
5814 */
5815static pci_ers_result_t ixgbe_io_slot_reset(struct pci_dev *pdev)
5816{
5817 struct net_device *netdev = pci_get_drvdata(pdev);
454d7c9b 5818 struct ixgbe_adapter *adapter = netdev_priv(netdev);
6fabd715
PWJ
5819 pci_ers_result_t result;
5820 int err;
9a799d71 5821
9ce77666 5822 if (pci_enable_device_mem(pdev)) {
9a799d71 5823 DPRINTK(PROBE, ERR,
b4617240 5824 "Cannot re-enable PCI device after reset.\n");
6fabd715
PWJ
5825 result = PCI_ERS_RESULT_DISCONNECT;
5826 } else {
5827 pci_set_master(pdev);
5828 pci_restore_state(pdev);
9a799d71 5829
dd4d8ca6 5830 pci_wake_from_d3(pdev, false);
9a799d71 5831
6fabd715 5832 ixgbe_reset(adapter);
88512539 5833 IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0);
6fabd715
PWJ
5834 result = PCI_ERS_RESULT_RECOVERED;
5835 }
5836
5837 err = pci_cleanup_aer_uncorrect_error_status(pdev);
5838 if (err) {
5839 dev_err(&pdev->dev,
5840 "pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n", err);
5841 /* non-fatal, continue */
5842 }
9a799d71 5843
6fabd715 5844 return result;
9a799d71
AK
5845}
5846
5847/**
5848 * ixgbe_io_resume - called when traffic can start flowing again.
5849 * @pdev: Pointer to PCI device
5850 *
5851 * This callback is called when the error recovery driver tells us that
5852 * its OK to resume normal operation.
5853 */
5854static void ixgbe_io_resume(struct pci_dev *pdev)
5855{
5856 struct net_device *netdev = pci_get_drvdata(pdev);
454d7c9b 5857 struct ixgbe_adapter *adapter = netdev_priv(netdev);
9a799d71
AK
5858
5859 if (netif_running(netdev)) {
5860 if (ixgbe_up(adapter)) {
5861 DPRINTK(PROBE, INFO, "ixgbe_up failed after reset\n");
5862 return;
5863 }
5864 }
5865
5866 netif_device_attach(netdev);
9a799d71
AK
5867}
5868
5869static struct pci_error_handlers ixgbe_err_handler = {
5870 .error_detected = ixgbe_io_error_detected,
5871 .slot_reset = ixgbe_io_slot_reset,
5872 .resume = ixgbe_io_resume,
5873};
5874
5875static struct pci_driver ixgbe_driver = {
5876 .name = ixgbe_driver_name,
5877 .id_table = ixgbe_pci_tbl,
5878 .probe = ixgbe_probe,
5879 .remove = __devexit_p(ixgbe_remove),
5880#ifdef CONFIG_PM
5881 .suspend = ixgbe_suspend,
5882 .resume = ixgbe_resume,
5883#endif
5884 .shutdown = ixgbe_shutdown,
5885 .err_handler = &ixgbe_err_handler
5886};
5887
5888/**
5889 * ixgbe_init_module - Driver Registration Routine
5890 *
5891 * ixgbe_init_module is the first routine called when the driver is
5892 * loaded. All it does is register with the PCI subsystem.
5893 **/
5894static int __init ixgbe_init_module(void)
5895{
5896 int ret;
5897 printk(KERN_INFO "%s: %s - version %s\n", ixgbe_driver_name,
5898 ixgbe_driver_string, ixgbe_driver_version);
5899
5900 printk(KERN_INFO "%s: %s\n", ixgbe_driver_name, ixgbe_copyright);
5901
5dd2d332 5902#ifdef CONFIG_IXGBE_DCA
bd0362dd 5903 dca_register_notify(&dca_notifier);
bd0362dd 5904#endif
5dd2d332 5905
9a799d71
AK
5906 ret = pci_register_driver(&ixgbe_driver);
5907 return ret;
5908}
b4617240 5909
9a799d71
AK
5910module_init(ixgbe_init_module);
5911
5912/**
5913 * ixgbe_exit_module - Driver Exit Cleanup Routine
5914 *
5915 * ixgbe_exit_module is called just before the driver is removed
5916 * from memory.
5917 **/
5918static void __exit ixgbe_exit_module(void)
5919{
5dd2d332 5920#ifdef CONFIG_IXGBE_DCA
bd0362dd
JC
5921 dca_unregister_notify(&dca_notifier);
5922#endif
9a799d71
AK
5923 pci_unregister_driver(&ixgbe_driver);
5924}
bd0362dd 5925
5dd2d332 5926#ifdef CONFIG_IXGBE_DCA
bd0362dd 5927static int ixgbe_notify_dca(struct notifier_block *nb, unsigned long event,
b4617240 5928 void *p)
bd0362dd
JC
5929{
5930 int ret_val;
5931
5932 ret_val = driver_for_each_device(&ixgbe_driver.driver, NULL, &event,
b4617240 5933 __ixgbe_notify_dca);
bd0362dd
JC
5934
5935 return ret_val ? NOTIFY_BAD : NOTIFY_DONE;
5936}
b453368d 5937
5dd2d332 5938#endif /* CONFIG_IXGBE_DCA */
b453368d
AD
5939#ifdef DEBUG
5940/**
5941 * ixgbe_get_hw_dev_name - return device name string
5942 * used by hardware layer to print debugging information
5943 **/
5944char *ixgbe_get_hw_dev_name(struct ixgbe_hw *hw)
5945{
5946 struct ixgbe_adapter *adapter = hw->back;
5947 return adapter->netdev->name;
5948}
bd0362dd 5949
b453368d 5950#endif
9a799d71
AK
5951module_exit(ixgbe_exit_module);
5952
5953/* ixgbe_main.c */