Commit | Line | Data |
---|---|---|
ae06c70b | 1 | // SPDX-License-Identifier: GPL-2.0 |
51dce24b | 2 | /* Copyright(c) 2007 - 2018 Intel Corporation. */ |
9d5c8243 | 3 | |
876d2d6f JK |
4 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
5 | ||
9d5c8243 AK |
6 | #include <linux/module.h> |
7 | #include <linux/types.h> | |
8 | #include <linux/init.h> | |
b2cb09b1 | 9 | #include <linux/bitops.h> |
9d5c8243 AK |
10 | #include <linux/vmalloc.h> |
11 | #include <linux/pagemap.h> | |
12 | #include <linux/netdevice.h> | |
9d5c8243 | 13 | #include <linux/ipv6.h> |
5a0e3ad6 | 14 | #include <linux/slab.h> |
9d5c8243 AK |
15 | #include <net/checksum.h> |
16 | #include <net/ip6_checksum.h> | |
05f9d3e1 | 17 | #include <net/pkt_sched.h> |
f8f3d34e | 18 | #include <net/pkt_cls.h> |
c6cb090b | 19 | #include <linux/net_tstamp.h> |
9d5c8243 AK |
20 | #include <linux/mii.h> |
21 | #include <linux/ethtool.h> | |
01789349 | 22 | #include <linux/if.h> |
9d5c8243 AK |
23 | #include <linux/if_vlan.h> |
24 | #include <linux/pci.h> | |
25 | #include <linux/delay.h> | |
26 | #include <linux/interrupt.h> | |
7d13a7d0 AD |
27 | #include <linux/ip.h> |
28 | #include <linux/tcp.h> | |
29 | #include <linux/sctp.h> | |
9d5c8243 | 30 | #include <linux/if_ether.h> |
40a914fa | 31 | #include <linux/aer.h> |
70c71606 | 32 | #include <linux/prefetch.h> |
749ab2cd | 33 | #include <linux/pm_runtime.h> |
806ffb1d | 34 | #include <linux/etherdevice.h> |
421e02f0 | 35 | #ifdef CONFIG_IGB_DCA |
fe4506b6 JC |
36 | #include <linux/dca.h> |
37 | #endif | |
441fc6fd | 38 | #include <linux/i2c.h> |
9d5c8243 AK |
39 | #include "igb.h" |
40 | ||
67b1b903 | 41 | #define MAJ 5 |
a865d22d | 42 | #define MIN 6 |
6fb46902 | 43 | #define BUILD 0 |
0d1fe82d | 44 | #define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \ |
929dd047 | 45 | __stringify(BUILD) "-k" |
05f9d3e1 AG |
46 | |
47 | enum queue_mode { | |
48 | QUEUE_MODE_STRICT_PRIORITY, | |
49 | QUEUE_MODE_STREAM_RESERVATION, | |
50 | }; | |
51 | ||
52 | enum tx_queue_prio { | |
53 | TX_QUEUE_PRIO_HIGH, | |
54 | TX_QUEUE_PRIO_LOW, | |
55 | }; | |
56 | ||
9d5c8243 AK |
57 | char igb_driver_name[] = "igb"; |
58 | char igb_driver_version[] = DRV_VERSION; | |
59 | static const char igb_driver_string[] = | |
60 | "Intel(R) Gigabit Ethernet Network Driver"; | |
4b9ea462 | 61 | static const char igb_copyright[] = |
74cfb2e1 | 62 | "Copyright (c) 2007-2014 Intel Corporation."; |
9d5c8243 | 63 | |
9d5c8243 AK |
64 | static const struct e1000_info *igb_info_tbl[] = { |
65 | [board_82575] = &e1000_82575_info, | |
66 | }; | |
67 | ||
cd1631ce | 68 | static const struct pci_device_id igb_pci_tbl[] = { |
ceb5f13b CW |
69 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_I354_BACKPLANE_1GBPS) }, |
70 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_I354_SGMII) }, | |
71 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_I354_BACKPLANE_2_5GBPS) }, | |
f96a8a0b CW |
72 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_I211_COPPER), board_82575 }, |
73 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_COPPER), board_82575 }, | |
74 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_FIBER), board_82575 }, | |
75 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_SERDES), board_82575 }, | |
76 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_SGMII), board_82575 }, | |
53b87ce3 CW |
77 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_COPPER_FLASHLESS), board_82575 }, |
78 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_SERDES_FLASHLESS), board_82575 }, | |
d2ba2ed8 AD |
79 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_COPPER), board_82575 }, |
80 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_FIBER), board_82575 }, | |
81 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_SERDES), board_82575 }, | |
82 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_SGMII), board_82575 }, | |
55cac248 AD |
83 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER), board_82575 }, |
84 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_FIBER), board_82575 }, | |
6493d24f | 85 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_QUAD_FIBER), board_82575 }, |
55cac248 AD |
86 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SERDES), board_82575 }, |
87 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SGMII), board_82575 }, | |
88 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER_DUAL), board_82575 }, | |
308fb39a JG |
89 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SGMII), board_82575 }, |
90 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SERDES), board_82575 }, | |
1b5dda33 GJ |
91 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_BACKPLANE), board_82575 }, |
92 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SFP), board_82575 }, | |
2d064c06 | 93 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576), board_82575 }, |
9eb2341d | 94 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS), board_82575 }, |
747d49ba | 95 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS_SERDES), board_82575 }, |
2d064c06 AD |
96 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_FIBER), board_82575 }, |
97 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES), board_82575 }, | |
4703bf73 | 98 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES_QUAD), board_82575 }, |
b894fa26 | 99 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_QUAD_COPPER_ET2), board_82575 }, |
c8ea5ea9 | 100 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_QUAD_COPPER), board_82575 }, |
9d5c8243 AK |
101 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_COPPER), board_82575 }, |
102 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_FIBER_SERDES), board_82575 }, | |
103 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575GB_QUAD_COPPER), board_82575 }, | |
104 | /* required last entry */ | |
105 | {0, } | |
106 | }; | |
107 | ||
108 | MODULE_DEVICE_TABLE(pci, igb_pci_tbl); | |
109 | ||
9d5c8243 AK |
110 | static int igb_setup_all_tx_resources(struct igb_adapter *); |
111 | static int igb_setup_all_rx_resources(struct igb_adapter *); | |
112 | static void igb_free_all_tx_resources(struct igb_adapter *); | |
113 | static void igb_free_all_rx_resources(struct igb_adapter *); | |
06cf2666 | 114 | static void igb_setup_mrqc(struct igb_adapter *); |
9d5c8243 | 115 | static int igb_probe(struct pci_dev *, const struct pci_device_id *); |
9f9a12f8 | 116 | static void igb_remove(struct pci_dev *pdev); |
9d5c8243 | 117 | static int igb_sw_init(struct igb_adapter *); |
46eafa59 SA |
118 | int igb_open(struct net_device *); |
119 | int igb_close(struct net_device *); | |
53c7d064 | 120 | static void igb_configure(struct igb_adapter *); |
9d5c8243 AK |
121 | static void igb_configure_tx(struct igb_adapter *); |
122 | static void igb_configure_rx(struct igb_adapter *); | |
9d5c8243 AK |
123 | static void igb_clean_all_tx_rings(struct igb_adapter *); |
124 | static void igb_clean_all_rx_rings(struct igb_adapter *); | |
3b644cf6 MW |
125 | static void igb_clean_tx_ring(struct igb_ring *); |
126 | static void igb_clean_rx_ring(struct igb_ring *); | |
ff41f8dc | 127 | static void igb_set_rx_mode(struct net_device *); |
26566eae KC |
128 | static void igb_update_phy_info(struct timer_list *); |
129 | static void igb_watchdog(struct timer_list *); | |
9d5c8243 | 130 | static void igb_watchdog_task(struct work_struct *); |
cd392f5c | 131 | static netdev_tx_t igb_xmit_frame(struct sk_buff *skb, struct net_device *); |
bc1f4470 | 132 | static void igb_get_stats64(struct net_device *dev, |
133 | struct rtnl_link_stats64 *stats); | |
9d5c8243 AK |
134 | static int igb_change_mtu(struct net_device *, int); |
135 | static int igb_set_mac(struct net_device *, void *); | |
bf456abb | 136 | static void igb_set_uta(struct igb_adapter *adapter, bool set); |
9d5c8243 AK |
137 | static irqreturn_t igb_intr(int irq, void *); |
138 | static irqreturn_t igb_intr_msi(int irq, void *); | |
139 | static irqreturn_t igb_msix_other(int irq, void *); | |
047e0030 | 140 | static irqreturn_t igb_msix_ring(int irq, void *); |
421e02f0 | 141 | #ifdef CONFIG_IGB_DCA |
047e0030 | 142 | static void igb_update_dca(struct igb_q_vector *); |
fe4506b6 | 143 | static void igb_setup_dca(struct igb_adapter *); |
421e02f0 | 144 | #endif /* CONFIG_IGB_DCA */ |
661086df | 145 | static int igb_poll(struct napi_struct *, int); |
7f0ba845 | 146 | static bool igb_clean_tx_irq(struct igb_q_vector *, int); |
32b3e08f | 147 | static int igb_clean_rx_irq(struct igb_q_vector *, int); |
9d5c8243 AK |
148 | static int igb_ioctl(struct net_device *, struct ifreq *, int cmd); |
149 | static void igb_tx_timeout(struct net_device *); | |
150 | static void igb_reset_task(struct work_struct *); | |
c502ea2e CW |
151 | static void igb_vlan_mode(struct net_device *netdev, |
152 | netdev_features_t features); | |
80d5c368 PM |
153 | static int igb_vlan_rx_add_vid(struct net_device *, __be16, u16); |
154 | static int igb_vlan_rx_kill_vid(struct net_device *, __be16, u16); | |
9d5c8243 | 155 | static void igb_restore_vlan(struct igb_adapter *); |
83c21335 | 156 | static void igb_rar_set_index(struct igb_adapter *, u32); |
4ae196df AD |
157 | static void igb_ping_all_vfs(struct igb_adapter *); |
158 | static void igb_msg_task(struct igb_adapter *); | |
4ae196df | 159 | static void igb_vmm_control(struct igb_adapter *); |
f2ca0dbe | 160 | static int igb_set_vf_mac(struct igb_adapter *, int, unsigned char *); |
83c21335 YK |
161 | static void igb_flush_mac_table(struct igb_adapter *); |
162 | static int igb_available_rars(struct igb_adapter *, u8); | |
163 | static void igb_set_default_mac_filter(struct igb_adapter *); | |
164 | static int igb_uc_sync(struct net_device *, const unsigned char *); | |
165 | static int igb_uc_unsync(struct net_device *, const unsigned char *); | |
4ae196df | 166 | static void igb_restore_vf_multicasts(struct igb_adapter *adapter); |
8151d294 WM |
167 | static int igb_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac); |
168 | static int igb_ndo_set_vf_vlan(struct net_device *netdev, | |
79aab093 | 169 | int vf, u16 vlan, u8 qos, __be16 vlan_proto); |
ed616689 | 170 | static int igb_ndo_set_vf_bw(struct net_device *, int, int, int); |
70ea4783 LL |
171 | static int igb_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, |
172 | bool setting); | |
1b8b062a CV |
173 | static int igb_ndo_set_vf_trust(struct net_device *netdev, int vf, |
174 | bool setting); | |
8151d294 WM |
175 | static int igb_ndo_get_vf_config(struct net_device *netdev, int vf, |
176 | struct ifla_vf_info *ivi); | |
17dc566c | 177 | static void igb_check_vf_rate_limit(struct igb_adapter *); |
0e71def2 GH |
178 | static void igb_nfc_filter_exit(struct igb_adapter *adapter); |
179 | static void igb_nfc_filter_restore(struct igb_adapter *adapter); | |
46a01698 RL |
180 | |
181 | #ifdef CONFIG_PCI_IOV | |
0224d663 | 182 | static int igb_vf_configure(struct igb_adapter *adapter, int vf); |
781798a1 | 183 | static int igb_pci_enable_sriov(struct pci_dev *dev, int num_vfs); |
ceee3450 TF |
184 | static int igb_disable_sriov(struct pci_dev *dev); |
185 | static int igb_pci_disable_sriov(struct pci_dev *dev); | |
46a01698 | 186 | #endif |
9d5c8243 | 187 | |
749ab2cd YZ |
188 | static int igb_suspend(struct device *); |
189 | static int igb_resume(struct device *); | |
749ab2cd YZ |
190 | static int igb_runtime_suspend(struct device *dev); |
191 | static int igb_runtime_resume(struct device *dev); | |
192 | static int igb_runtime_idle(struct device *dev); | |
749ab2cd YZ |
193 | static const struct dev_pm_ops igb_pm_ops = { |
194 | SET_SYSTEM_SLEEP_PM_OPS(igb_suspend, igb_resume) | |
195 | SET_RUNTIME_PM_OPS(igb_runtime_suspend, igb_runtime_resume, | |
196 | igb_runtime_idle) | |
197 | }; | |
9d5c8243 | 198 | static void igb_shutdown(struct pci_dev *); |
fa44f2f1 | 199 | static int igb_pci_sriov_configure(struct pci_dev *dev, int num_vfs); |
421e02f0 | 200 | #ifdef CONFIG_IGB_DCA |
fe4506b6 JC |
201 | static int igb_notify_dca(struct notifier_block *, unsigned long, void *); |
202 | static struct notifier_block dca_notifier = { | |
203 | .notifier_call = igb_notify_dca, | |
204 | .next = NULL, | |
205 | .priority = 0 | |
206 | }; | |
207 | #endif | |
37680117 | 208 | #ifdef CONFIG_PCI_IOV |
6dd6d2b7 | 209 | static unsigned int max_vfs; |
2a3abf6d | 210 | module_param(max_vfs, uint, 0); |
c75c4edf | 211 | MODULE_PARM_DESC(max_vfs, "Maximum number of virtual functions to allocate per physical function"); |
2a3abf6d AD |
212 | #endif /* CONFIG_PCI_IOV */ |
213 | ||
9d5c8243 AK |
214 | static pci_ers_result_t igb_io_error_detected(struct pci_dev *, |
215 | pci_channel_state_t); | |
216 | static pci_ers_result_t igb_io_slot_reset(struct pci_dev *); | |
217 | static void igb_io_resume(struct pci_dev *); | |
218 | ||
3646f0e5 | 219 | static const struct pci_error_handlers igb_err_handler = { |
9d5c8243 AK |
220 | .error_detected = igb_io_error_detected, |
221 | .slot_reset = igb_io_slot_reset, | |
222 | .resume = igb_io_resume, | |
223 | }; | |
224 | ||
b6e0c419 | 225 | static void igb_init_dmac(struct igb_adapter *adapter, u32 pba); |
9d5c8243 AK |
226 | |
227 | static struct pci_driver igb_driver = { | |
228 | .name = igb_driver_name, | |
229 | .id_table = igb_pci_tbl, | |
230 | .probe = igb_probe, | |
9f9a12f8 | 231 | .remove = igb_remove, |
9d5c8243 | 232 | #ifdef CONFIG_PM |
749ab2cd | 233 | .driver.pm = &igb_pm_ops, |
9d5c8243 AK |
234 | #endif |
235 | .shutdown = igb_shutdown, | |
fa44f2f1 | 236 | .sriov_configure = igb_pci_sriov_configure, |
9d5c8243 AK |
237 | .err_handler = &igb_err_handler |
238 | }; | |
239 | ||
240 | MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>"); | |
241 | MODULE_DESCRIPTION("Intel(R) Gigabit Ethernet Network Driver"); | |
98674ebe | 242 | MODULE_LICENSE("GPL v2"); |
9d5c8243 AK |
243 | MODULE_VERSION(DRV_VERSION); |
244 | ||
b3f4d599 | 245 | #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK) |
246 | static int debug = -1; | |
247 | module_param(debug, int, 0); | |
248 | MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); | |
249 | ||
c97ec42a TI |
250 | struct igb_reg_info { |
251 | u32 ofs; | |
252 | char *name; | |
253 | }; | |
254 | ||
255 | static const struct igb_reg_info igb_reg_info_tbl[] = { | |
256 | ||
257 | /* General Registers */ | |
258 | {E1000_CTRL, "CTRL"}, | |
259 | {E1000_STATUS, "STATUS"}, | |
260 | {E1000_CTRL_EXT, "CTRL_EXT"}, | |
261 | ||
262 | /* Interrupt Registers */ | |
263 | {E1000_ICR, "ICR"}, | |
264 | ||
265 | /* RX Registers */ | |
266 | {E1000_RCTL, "RCTL"}, | |
267 | {E1000_RDLEN(0), "RDLEN"}, | |
268 | {E1000_RDH(0), "RDH"}, | |
269 | {E1000_RDT(0), "RDT"}, | |
270 | {E1000_RXDCTL(0), "RXDCTL"}, | |
271 | {E1000_RDBAL(0), "RDBAL"}, | |
272 | {E1000_RDBAH(0), "RDBAH"}, | |
273 | ||
274 | /* TX Registers */ | |
275 | {E1000_TCTL, "TCTL"}, | |
276 | {E1000_TDBAL(0), "TDBAL"}, | |
277 | {E1000_TDBAH(0), "TDBAH"}, | |
278 | {E1000_TDLEN(0), "TDLEN"}, | |
279 | {E1000_TDH(0), "TDH"}, | |
280 | {E1000_TDT(0), "TDT"}, | |
281 | {E1000_TXDCTL(0), "TXDCTL"}, | |
282 | {E1000_TDFH, "TDFH"}, | |
283 | {E1000_TDFT, "TDFT"}, | |
284 | {E1000_TDFHS, "TDFHS"}, | |
285 | {E1000_TDFPC, "TDFPC"}, | |
286 | ||
287 | /* List Terminator */ | |
288 | {} | |
289 | }; | |
290 | ||
b980ac18 | 291 | /* igb_regdump - register printout routine */ |
c97ec42a TI |
292 | static void igb_regdump(struct e1000_hw *hw, struct igb_reg_info *reginfo) |
293 | { | |
294 | int n = 0; | |
295 | char rname[16]; | |
296 | u32 regs[8]; | |
297 | ||
298 | switch (reginfo->ofs) { | |
299 | case E1000_RDLEN(0): | |
300 | for (n = 0; n < 4; n++) | |
301 | regs[n] = rd32(E1000_RDLEN(n)); | |
302 | break; | |
303 | case E1000_RDH(0): | |
304 | for (n = 0; n < 4; n++) | |
305 | regs[n] = rd32(E1000_RDH(n)); | |
306 | break; | |
307 | case E1000_RDT(0): | |
308 | for (n = 0; n < 4; n++) | |
309 | regs[n] = rd32(E1000_RDT(n)); | |
310 | break; | |
311 | case E1000_RXDCTL(0): | |
312 | for (n = 0; n < 4; n++) | |
313 | regs[n] = rd32(E1000_RXDCTL(n)); | |
314 | break; | |
315 | case E1000_RDBAL(0): | |
316 | for (n = 0; n < 4; n++) | |
317 | regs[n] = rd32(E1000_RDBAL(n)); | |
318 | break; | |
319 | case E1000_RDBAH(0): | |
320 | for (n = 0; n < 4; n++) | |
321 | regs[n] = rd32(E1000_RDBAH(n)); | |
322 | break; | |
323 | case E1000_TDBAL(0): | |
324 | for (n = 0; n < 4; n++) | |
325 | regs[n] = rd32(E1000_RDBAL(n)); | |
326 | break; | |
327 | case E1000_TDBAH(0): | |
328 | for (n = 0; n < 4; n++) | |
329 | regs[n] = rd32(E1000_TDBAH(n)); | |
330 | break; | |
331 | case E1000_TDLEN(0): | |
332 | for (n = 0; n < 4; n++) | |
333 | regs[n] = rd32(E1000_TDLEN(n)); | |
334 | break; | |
335 | case E1000_TDH(0): | |
336 | for (n = 0; n < 4; n++) | |
337 | regs[n] = rd32(E1000_TDH(n)); | |
338 | break; | |
339 | case E1000_TDT(0): | |
340 | for (n = 0; n < 4; n++) | |
341 | regs[n] = rd32(E1000_TDT(n)); | |
342 | break; | |
343 | case E1000_TXDCTL(0): | |
344 | for (n = 0; n < 4; n++) | |
345 | regs[n] = rd32(E1000_TXDCTL(n)); | |
346 | break; | |
347 | default: | |
876d2d6f | 348 | pr_info("%-15s %08x\n", reginfo->name, rd32(reginfo->ofs)); |
c97ec42a TI |
349 | return; |
350 | } | |
351 | ||
352 | snprintf(rname, 16, "%s%s", reginfo->name, "[0-3]"); | |
876d2d6f JK |
353 | pr_info("%-15s %08x %08x %08x %08x\n", rname, regs[0], regs[1], |
354 | regs[2], regs[3]); | |
c97ec42a TI |
355 | } |
356 | ||
b980ac18 | 357 | /* igb_dump - Print registers, Tx-rings and Rx-rings */ |
c97ec42a TI |
358 | static void igb_dump(struct igb_adapter *adapter) |
359 | { | |
360 | struct net_device *netdev = adapter->netdev; | |
361 | struct e1000_hw *hw = &adapter->hw; | |
362 | struct igb_reg_info *reginfo; | |
c97ec42a TI |
363 | struct igb_ring *tx_ring; |
364 | union e1000_adv_tx_desc *tx_desc; | |
365 | struct my_u0 { u64 a; u64 b; } *u0; | |
c97ec42a TI |
366 | struct igb_ring *rx_ring; |
367 | union e1000_adv_rx_desc *rx_desc; | |
368 | u32 staterr; | |
6ad4edfc | 369 | u16 i, n; |
c97ec42a TI |
370 | |
371 | if (!netif_msg_hw(adapter)) | |
372 | return; | |
373 | ||
374 | /* Print netdevice Info */ | |
375 | if (netdev) { | |
376 | dev_info(&adapter->pdev->dev, "Net device Info\n"); | |
4a7c9726 TK |
377 | pr_info("Device Name state trans_start\n"); |
378 | pr_info("%-15s %016lX %016lX\n", netdev->name, | |
379 | netdev->state, dev_trans_start(netdev)); | |
c97ec42a TI |
380 | } |
381 | ||
382 | /* Print Registers */ | |
383 | dev_info(&adapter->pdev->dev, "Register Dump\n"); | |
876d2d6f | 384 | pr_info(" Register Name Value\n"); |
c97ec42a TI |
385 | for (reginfo = (struct igb_reg_info *)igb_reg_info_tbl; |
386 | reginfo->name; reginfo++) { | |
387 | igb_regdump(hw, reginfo); | |
388 | } | |
389 | ||
390 | /* Print TX Ring Summary */ | |
391 | if (!netdev || !netif_running(netdev)) | |
392 | goto exit; | |
393 | ||
394 | dev_info(&adapter->pdev->dev, "TX Rings Summary\n"); | |
876d2d6f | 395 | pr_info("Queue [NTU] [NTC] [bi(ntc)->dma ] leng ntw timestamp\n"); |
c97ec42a | 396 | for (n = 0; n < adapter->num_tx_queues; n++) { |
06034649 | 397 | struct igb_tx_buffer *buffer_info; |
c97ec42a | 398 | tx_ring = adapter->tx_ring[n]; |
06034649 | 399 | buffer_info = &tx_ring->tx_buffer_info[tx_ring->next_to_clean]; |
876d2d6f JK |
400 | pr_info(" %5d %5X %5X %016llX %04X %p %016llX\n", |
401 | n, tx_ring->next_to_use, tx_ring->next_to_clean, | |
c9f14bf3 AD |
402 | (u64)dma_unmap_addr(buffer_info, dma), |
403 | dma_unmap_len(buffer_info, len), | |
876d2d6f JK |
404 | buffer_info->next_to_watch, |
405 | (u64)buffer_info->time_stamp); | |
c97ec42a TI |
406 | } |
407 | ||
408 | /* Print TX Rings */ | |
409 | if (!netif_msg_tx_done(adapter)) | |
410 | goto rx_ring_summary; | |
411 | ||
412 | dev_info(&adapter->pdev->dev, "TX Rings Dump\n"); | |
413 | ||
414 | /* Transmit Descriptor Formats | |
415 | * | |
416 | * Advanced Transmit Descriptor | |
417 | * +--------------------------------------------------------------+ | |
418 | * 0 | Buffer Address [63:0] | | |
419 | * +--------------------------------------------------------------+ | |
420 | * 8 | PAYLEN | PORTS |CC|IDX | STA | DCMD |DTYP|MAC|RSV| DTALEN | | |
421 | * +--------------------------------------------------------------+ | |
422 | * 63 46 45 40 39 38 36 35 32 31 24 15 0 | |
423 | */ | |
424 | ||
425 | for (n = 0; n < adapter->num_tx_queues; n++) { | |
426 | tx_ring = adapter->tx_ring[n]; | |
876d2d6f JK |
427 | pr_info("------------------------------------\n"); |
428 | pr_info("TX QUEUE INDEX = %d\n", tx_ring->queue_index); | |
429 | pr_info("------------------------------------\n"); | |
c75c4edf | 430 | pr_info("T [desc] [address 63:0 ] [PlPOCIStDDM Ln] [bi->dma ] leng ntw timestamp bi->skb\n"); |
c97ec42a TI |
431 | |
432 | for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) { | |
876d2d6f | 433 | const char *next_desc; |
06034649 | 434 | struct igb_tx_buffer *buffer_info; |
60136906 | 435 | tx_desc = IGB_TX_DESC(tx_ring, i); |
06034649 | 436 | buffer_info = &tx_ring->tx_buffer_info[i]; |
c97ec42a | 437 | u0 = (struct my_u0 *)tx_desc; |
876d2d6f JK |
438 | if (i == tx_ring->next_to_use && |
439 | i == tx_ring->next_to_clean) | |
440 | next_desc = " NTC/U"; | |
441 | else if (i == tx_ring->next_to_use) | |
442 | next_desc = " NTU"; | |
443 | else if (i == tx_ring->next_to_clean) | |
444 | next_desc = " NTC"; | |
445 | else | |
446 | next_desc = ""; | |
447 | ||
c75c4edf CW |
448 | pr_info("T [0x%03X] %016llX %016llX %016llX %04X %p %016llX %p%s\n", |
449 | i, le64_to_cpu(u0->a), | |
c97ec42a | 450 | le64_to_cpu(u0->b), |
c9f14bf3 AD |
451 | (u64)dma_unmap_addr(buffer_info, dma), |
452 | dma_unmap_len(buffer_info, len), | |
c97ec42a TI |
453 | buffer_info->next_to_watch, |
454 | (u64)buffer_info->time_stamp, | |
876d2d6f | 455 | buffer_info->skb, next_desc); |
c97ec42a | 456 | |
b669588a | 457 | if (netif_msg_pktdata(adapter) && buffer_info->skb) |
c97ec42a TI |
458 | print_hex_dump(KERN_INFO, "", |
459 | DUMP_PREFIX_ADDRESS, | |
b669588a | 460 | 16, 1, buffer_info->skb->data, |
c9f14bf3 AD |
461 | dma_unmap_len(buffer_info, len), |
462 | true); | |
c97ec42a TI |
463 | } |
464 | } | |
465 | ||
466 | /* Print RX Rings Summary */ | |
467 | rx_ring_summary: | |
468 | dev_info(&adapter->pdev->dev, "RX Rings Summary\n"); | |
876d2d6f | 469 | pr_info("Queue [NTU] [NTC]\n"); |
c97ec42a TI |
470 | for (n = 0; n < adapter->num_rx_queues; n++) { |
471 | rx_ring = adapter->rx_ring[n]; | |
876d2d6f JK |
472 | pr_info(" %5d %5X %5X\n", |
473 | n, rx_ring->next_to_use, rx_ring->next_to_clean); | |
c97ec42a TI |
474 | } |
475 | ||
476 | /* Print RX Rings */ | |
477 | if (!netif_msg_rx_status(adapter)) | |
478 | goto exit; | |
479 | ||
480 | dev_info(&adapter->pdev->dev, "RX Rings Dump\n"); | |
481 | ||
482 | /* Advanced Receive Descriptor (Read) Format | |
483 | * 63 1 0 | |
484 | * +-----------------------------------------------------+ | |
485 | * 0 | Packet Buffer Address [63:1] |A0/NSE| | |
486 | * +----------------------------------------------+------+ | |
487 | * 8 | Header Buffer Address [63:1] | DD | | |
488 | * +-----------------------------------------------------+ | |
489 | * | |
490 | * | |
491 | * Advanced Receive Descriptor (Write-Back) Format | |
492 | * | |
493 | * 63 48 47 32 31 30 21 20 17 16 4 3 0 | |
494 | * +------------------------------------------------------+ | |
495 | * 0 | Packet IP |SPH| HDR_LEN | RSV|Packet| RSS | | |
496 | * | Checksum Ident | | | | Type | Type | | |
497 | * +------------------------------------------------------+ | |
498 | * 8 | VLAN Tag | Length | Extended Error | Extended Status | | |
499 | * +------------------------------------------------------+ | |
500 | * 63 48 47 32 31 20 19 0 | |
501 | */ | |
502 | ||
503 | for (n = 0; n < adapter->num_rx_queues; n++) { | |
504 | rx_ring = adapter->rx_ring[n]; | |
876d2d6f JK |
505 | pr_info("------------------------------------\n"); |
506 | pr_info("RX QUEUE INDEX = %d\n", rx_ring->queue_index); | |
507 | pr_info("------------------------------------\n"); | |
c75c4edf CW |
508 | pr_info("R [desc] [ PktBuf A0] [ HeadBuf DD] [bi->dma ] [bi->skb] <-- Adv Rx Read format\n"); |
509 | pr_info("RWB[desc] [PcsmIpSHl PtRs] [vl er S cks ln] ---------------- [bi->skb] <-- Adv Rx Write-Back format\n"); | |
c97ec42a TI |
510 | |
511 | for (i = 0; i < rx_ring->count; i++) { | |
876d2d6f | 512 | const char *next_desc; |
06034649 AD |
513 | struct igb_rx_buffer *buffer_info; |
514 | buffer_info = &rx_ring->rx_buffer_info[i]; | |
60136906 | 515 | rx_desc = IGB_RX_DESC(rx_ring, i); |
c97ec42a TI |
516 | u0 = (struct my_u0 *)rx_desc; |
517 | staterr = le32_to_cpu(rx_desc->wb.upper.status_error); | |
876d2d6f JK |
518 | |
519 | if (i == rx_ring->next_to_use) | |
520 | next_desc = " NTU"; | |
521 | else if (i == rx_ring->next_to_clean) | |
522 | next_desc = " NTC"; | |
523 | else | |
524 | next_desc = ""; | |
525 | ||
c97ec42a TI |
526 | if (staterr & E1000_RXD_STAT_DD) { |
527 | /* Descriptor Done */ | |
1a1c225b AD |
528 | pr_info("%s[0x%03X] %016llX %016llX ---------------- %s\n", |
529 | "RWB", i, | |
c97ec42a TI |
530 | le64_to_cpu(u0->a), |
531 | le64_to_cpu(u0->b), | |
1a1c225b | 532 | next_desc); |
c97ec42a | 533 | } else { |
1a1c225b AD |
534 | pr_info("%s[0x%03X] %016llX %016llX %016llX %s\n", |
535 | "R ", i, | |
c97ec42a TI |
536 | le64_to_cpu(u0->a), |
537 | le64_to_cpu(u0->b), | |
538 | (u64)buffer_info->dma, | |
1a1c225b | 539 | next_desc); |
c97ec42a | 540 | |
b669588a | 541 | if (netif_msg_pktdata(adapter) && |
1a1c225b | 542 | buffer_info->dma && buffer_info->page) { |
44390ca6 AD |
543 | print_hex_dump(KERN_INFO, "", |
544 | DUMP_PREFIX_ADDRESS, | |
545 | 16, 1, | |
b669588a ET |
546 | page_address(buffer_info->page) + |
547 | buffer_info->page_offset, | |
8649aaef | 548 | igb_rx_bufsz(rx_ring), true); |
c97ec42a TI |
549 | } |
550 | } | |
c97ec42a TI |
551 | } |
552 | } | |
553 | ||
554 | exit: | |
555 | return; | |
556 | } | |
557 | ||
b980ac18 JK |
558 | /** |
559 | * igb_get_i2c_data - Reads the I2C SDA data bit | |
441fc6fd CW |
560 | * @hw: pointer to hardware structure |
561 | * @i2cctl: Current value of I2CCTL register | |
562 | * | |
563 | * Returns the I2C data bit value | |
b980ac18 | 564 | **/ |
441fc6fd CW |
565 | static int igb_get_i2c_data(void *data) |
566 | { | |
567 | struct igb_adapter *adapter = (struct igb_adapter *)data; | |
568 | struct e1000_hw *hw = &adapter->hw; | |
569 | s32 i2cctl = rd32(E1000_I2CPARAMS); | |
570 | ||
da1f1dfe | 571 | return !!(i2cctl & E1000_I2C_DATA_IN); |
441fc6fd CW |
572 | } |
573 | ||
b980ac18 JK |
574 | /** |
575 | * igb_set_i2c_data - Sets the I2C data bit | |
441fc6fd CW |
576 | * @data: pointer to hardware structure |
577 | * @state: I2C data value (0 or 1) to set | |
578 | * | |
579 | * Sets the I2C data bit | |
b980ac18 | 580 | **/ |
441fc6fd CW |
581 | static void igb_set_i2c_data(void *data, int state) |
582 | { | |
583 | struct igb_adapter *adapter = (struct igb_adapter *)data; | |
584 | struct e1000_hw *hw = &adapter->hw; | |
585 | s32 i2cctl = rd32(E1000_I2CPARAMS); | |
586 | ||
587 | if (state) | |
588 | i2cctl |= E1000_I2C_DATA_OUT; | |
589 | else | |
590 | i2cctl &= ~E1000_I2C_DATA_OUT; | |
591 | ||
592 | i2cctl &= ~E1000_I2C_DATA_OE_N; | |
593 | i2cctl |= E1000_I2C_CLK_OE_N; | |
594 | wr32(E1000_I2CPARAMS, i2cctl); | |
595 | wrfl(); | |
596 | ||
597 | } | |
598 | ||
b980ac18 JK |
599 | /** |
600 | * igb_set_i2c_clk - Sets the I2C SCL clock | |
441fc6fd CW |
601 | * @data: pointer to hardware structure |
602 | * @state: state to set clock | |
603 | * | |
604 | * Sets the I2C clock line to state | |
b980ac18 | 605 | **/ |
441fc6fd CW |
606 | static void igb_set_i2c_clk(void *data, int state) |
607 | { | |
608 | struct igb_adapter *adapter = (struct igb_adapter *)data; | |
609 | struct e1000_hw *hw = &adapter->hw; | |
610 | s32 i2cctl = rd32(E1000_I2CPARAMS); | |
611 | ||
612 | if (state) { | |
613 | i2cctl |= E1000_I2C_CLK_OUT; | |
614 | i2cctl &= ~E1000_I2C_CLK_OE_N; | |
615 | } else { | |
616 | i2cctl &= ~E1000_I2C_CLK_OUT; | |
617 | i2cctl &= ~E1000_I2C_CLK_OE_N; | |
618 | } | |
619 | wr32(E1000_I2CPARAMS, i2cctl); | |
620 | wrfl(); | |
621 | } | |
622 | ||
b980ac18 JK |
623 | /** |
624 | * igb_get_i2c_clk - Gets the I2C SCL clock state | |
441fc6fd CW |
625 | * @data: pointer to hardware structure |
626 | * | |
627 | * Gets the I2C clock state | |
b980ac18 | 628 | **/ |
441fc6fd CW |
629 | static int igb_get_i2c_clk(void *data) |
630 | { | |
631 | struct igb_adapter *adapter = (struct igb_adapter *)data; | |
632 | struct e1000_hw *hw = &adapter->hw; | |
633 | s32 i2cctl = rd32(E1000_I2CPARAMS); | |
634 | ||
da1f1dfe | 635 | return !!(i2cctl & E1000_I2C_CLK_IN); |
441fc6fd CW |
636 | } |
637 | ||
638 | static const struct i2c_algo_bit_data igb_i2c_algo = { | |
639 | .setsda = igb_set_i2c_data, | |
640 | .setscl = igb_set_i2c_clk, | |
641 | .getsda = igb_get_i2c_data, | |
642 | .getscl = igb_get_i2c_clk, | |
643 | .udelay = 5, | |
644 | .timeout = 20, | |
645 | }; | |
646 | ||
9d5c8243 | 647 | /** |
b980ac18 JK |
648 | * igb_get_hw_dev - return device |
649 | * @hw: pointer to hardware structure | |
650 | * | |
651 | * used by hardware layer to print debugging information | |
9d5c8243 | 652 | **/ |
c041076a | 653 | struct net_device *igb_get_hw_dev(struct e1000_hw *hw) |
9d5c8243 AK |
654 | { |
655 | struct igb_adapter *adapter = hw->back; | |
c041076a | 656 | return adapter->netdev; |
9d5c8243 | 657 | } |
38c845c7 | 658 | |
9d5c8243 | 659 | /** |
b980ac18 | 660 | * igb_init_module - Driver Registration Routine |
9d5c8243 | 661 | * |
b980ac18 JK |
662 | * igb_init_module is the first routine called when the driver is |
663 | * loaded. All it does is register with the PCI subsystem. | |
9d5c8243 AK |
664 | **/ |
665 | static int __init igb_init_module(void) | |
666 | { | |
667 | int ret; | |
9005df38 | 668 | |
876d2d6f | 669 | pr_info("%s - version %s\n", |
9d5c8243 | 670 | igb_driver_string, igb_driver_version); |
876d2d6f | 671 | pr_info("%s\n", igb_copyright); |
9d5c8243 | 672 | |
421e02f0 | 673 | #ifdef CONFIG_IGB_DCA |
fe4506b6 JC |
674 | dca_register_notify(&dca_notifier); |
675 | #endif | |
bbd98fe4 | 676 | ret = pci_register_driver(&igb_driver); |
9d5c8243 AK |
677 | return ret; |
678 | } | |
679 | ||
680 | module_init(igb_init_module); | |
681 | ||
682 | /** | |
b980ac18 | 683 | * igb_exit_module - Driver Exit Cleanup Routine |
9d5c8243 | 684 | * |
b980ac18 JK |
685 | * igb_exit_module is called just before the driver is removed |
686 | * from memory. | |
9d5c8243 AK |
687 | **/ |
688 | static void __exit igb_exit_module(void) | |
689 | { | |
421e02f0 | 690 | #ifdef CONFIG_IGB_DCA |
fe4506b6 JC |
691 | dca_unregister_notify(&dca_notifier); |
692 | #endif | |
9d5c8243 AK |
693 | pci_unregister_driver(&igb_driver); |
694 | } | |
695 | ||
696 | module_exit(igb_exit_module); | |
697 | ||
26bc19ec AD |
698 | #define Q_IDX_82576(i) (((i & 0x1) << 3) + (i >> 1)) |
699 | /** | |
b980ac18 JK |
700 | * igb_cache_ring_register - Descriptor ring to register mapping |
701 | * @adapter: board private structure to initialize | |
26bc19ec | 702 | * |
b980ac18 JK |
703 | * Once we know the feature-set enabled for the device, we'll cache |
704 | * the register offset the descriptor ring is assigned to. | |
26bc19ec AD |
705 | **/ |
706 | static void igb_cache_ring_register(struct igb_adapter *adapter) | |
707 | { | |
ee1b9f06 | 708 | int i = 0, j = 0; |
047e0030 | 709 | u32 rbase_offset = adapter->vfs_allocated_count; |
26bc19ec AD |
710 | |
711 | switch (adapter->hw.mac.type) { | |
712 | case e1000_82576: | |
713 | /* The queues are allocated for virtualization such that VF 0 | |
714 | * is allocated queues 0 and 8, VF 1 queues 1 and 9, etc. | |
715 | * In order to avoid collision we start at the first free queue | |
716 | * and continue consuming queues in the same sequence | |
717 | */ | |
ee1b9f06 | 718 | if (adapter->vfs_allocated_count) { |
a99955fc | 719 | for (; i < adapter->rss_queues; i++) |
3025a446 | 720 | adapter->rx_ring[i]->reg_idx = rbase_offset + |
b980ac18 | 721 | Q_IDX_82576(i); |
ee1b9f06 | 722 | } |
b26141d4 | 723 | /* Fall through */ |
26bc19ec | 724 | case e1000_82575: |
55cac248 | 725 | case e1000_82580: |
d2ba2ed8 | 726 | case e1000_i350: |
ceb5f13b | 727 | case e1000_i354: |
f96a8a0b CW |
728 | case e1000_i210: |
729 | case e1000_i211: | |
b26141d4 | 730 | /* Fall through */ |
26bc19ec | 731 | default: |
ee1b9f06 | 732 | for (; i < adapter->num_rx_queues; i++) |
3025a446 | 733 | adapter->rx_ring[i]->reg_idx = rbase_offset + i; |
ee1b9f06 | 734 | for (; j < adapter->num_tx_queues; j++) |
3025a446 | 735 | adapter->tx_ring[j]->reg_idx = rbase_offset + j; |
26bc19ec AD |
736 | break; |
737 | } | |
738 | } | |
739 | ||
22a8b291 FT |
740 | u32 igb_rd32(struct e1000_hw *hw, u32 reg) |
741 | { | |
742 | struct igb_adapter *igb = container_of(hw, struct igb_adapter, hw); | |
6aa7de05 | 743 | u8 __iomem *hw_addr = READ_ONCE(hw->hw_addr); |
22a8b291 FT |
744 | u32 value = 0; |
745 | ||
746 | if (E1000_REMOVED(hw_addr)) | |
747 | return ~value; | |
748 | ||
749 | value = readl(&hw_addr[reg]); | |
750 | ||
751 | /* reads should not return all F's */ | |
752 | if (!(~value) && (!reg || !(~readl(hw_addr)))) { | |
753 | struct net_device *netdev = igb->netdev; | |
754 | hw->hw_addr = NULL; | |
17a0b9ad | 755 | netdev_err(netdev, "PCIe link lost\n"); |
94bc1e52 LP |
756 | WARN(pci_device_is_present(igb->pdev), |
757 | "igb: Failed to read reg 0x%x!\n", reg); | |
22a8b291 FT |
758 | } |
759 | ||
760 | return value; | |
761 | } | |
762 | ||
4be000c8 AD |
763 | /** |
764 | * igb_write_ivar - configure ivar for given MSI-X vector | |
765 | * @hw: pointer to the HW structure | |
766 | * @msix_vector: vector number we are allocating to a given ring | |
767 | * @index: row index of IVAR register to write within IVAR table | |
768 | * @offset: column offset of in IVAR, should be multiple of 8 | |
769 | * | |
770 | * This function is intended to handle the writing of the IVAR register | |
771 | * for adapters 82576 and newer. The IVAR table consists of 2 columns, | |
772 | * each containing an cause allocation for an Rx and Tx ring, and a | |
773 | * variable number of rows depending on the number of queues supported. | |
774 | **/ | |
775 | static void igb_write_ivar(struct e1000_hw *hw, int msix_vector, | |
776 | int index, int offset) | |
777 | { | |
778 | u32 ivar = array_rd32(E1000_IVAR0, index); | |
779 | ||
780 | /* clear any bits that are currently set */ | |
781 | ivar &= ~((u32)0xFF << offset); | |
782 | ||
783 | /* write vector and valid bit */ | |
784 | ivar |= (msix_vector | E1000_IVAR_VALID) << offset; | |
785 | ||
786 | array_wr32(E1000_IVAR0, index, ivar); | |
787 | } | |
788 | ||
9d5c8243 | 789 | #define IGB_N0_QUEUE -1 |
047e0030 | 790 | static void igb_assign_vector(struct igb_q_vector *q_vector, int msix_vector) |
9d5c8243 | 791 | { |
047e0030 | 792 | struct igb_adapter *adapter = q_vector->adapter; |
9d5c8243 | 793 | struct e1000_hw *hw = &adapter->hw; |
047e0030 AD |
794 | int rx_queue = IGB_N0_QUEUE; |
795 | int tx_queue = IGB_N0_QUEUE; | |
4be000c8 | 796 | u32 msixbm = 0; |
047e0030 | 797 | |
0ba82994 AD |
798 | if (q_vector->rx.ring) |
799 | rx_queue = q_vector->rx.ring->reg_idx; | |
800 | if (q_vector->tx.ring) | |
801 | tx_queue = q_vector->tx.ring->reg_idx; | |
2d064c06 AD |
802 | |
803 | switch (hw->mac.type) { | |
804 | case e1000_82575: | |
9d5c8243 | 805 | /* The 82575 assigns vectors using a bitmask, which matches the |
b980ac18 JK |
806 | * bitmask for the EICR/EIMS/EIMC registers. To assign one |
807 | * or more queues to a vector, we write the appropriate bits | |
808 | * into the MSIXBM register for that vector. | |
809 | */ | |
047e0030 | 810 | if (rx_queue > IGB_N0_QUEUE) |
9d5c8243 | 811 | msixbm = E1000_EICR_RX_QUEUE0 << rx_queue; |
047e0030 | 812 | if (tx_queue > IGB_N0_QUEUE) |
9d5c8243 | 813 | msixbm |= E1000_EICR_TX_QUEUE0 << tx_queue; |
cd14ef54 | 814 | if (!(adapter->flags & IGB_FLAG_HAS_MSIX) && msix_vector == 0) |
feeb2721 | 815 | msixbm |= E1000_EIMS_OTHER; |
9d5c8243 | 816 | array_wr32(E1000_MSIXBM(0), msix_vector, msixbm); |
047e0030 | 817 | q_vector->eims_value = msixbm; |
2d064c06 AD |
818 | break; |
819 | case e1000_82576: | |
b980ac18 | 820 | /* 82576 uses a table that essentially consists of 2 columns |
4be000c8 AD |
821 | * with 8 rows. The ordering is column-major so we use the |
822 | * lower 3 bits as the row index, and the 4th bit as the | |
823 | * column offset. | |
824 | */ | |
825 | if (rx_queue > IGB_N0_QUEUE) | |
826 | igb_write_ivar(hw, msix_vector, | |
827 | rx_queue & 0x7, | |
828 | (rx_queue & 0x8) << 1); | |
829 | if (tx_queue > IGB_N0_QUEUE) | |
830 | igb_write_ivar(hw, msix_vector, | |
831 | tx_queue & 0x7, | |
832 | ((tx_queue & 0x8) << 1) + 8); | |
a51d8c21 | 833 | q_vector->eims_value = BIT(msix_vector); |
2d064c06 | 834 | break; |
55cac248 | 835 | case e1000_82580: |
d2ba2ed8 | 836 | case e1000_i350: |
ceb5f13b | 837 | case e1000_i354: |
f96a8a0b CW |
838 | case e1000_i210: |
839 | case e1000_i211: | |
b980ac18 | 840 | /* On 82580 and newer adapters the scheme is similar to 82576 |
4be000c8 AD |
841 | * however instead of ordering column-major we have things |
842 | * ordered row-major. So we traverse the table by using | |
843 | * bit 0 as the column offset, and the remaining bits as the | |
844 | * row index. | |
845 | */ | |
846 | if (rx_queue > IGB_N0_QUEUE) | |
847 | igb_write_ivar(hw, msix_vector, | |
848 | rx_queue >> 1, | |
849 | (rx_queue & 0x1) << 4); | |
850 | if (tx_queue > IGB_N0_QUEUE) | |
851 | igb_write_ivar(hw, msix_vector, | |
852 | tx_queue >> 1, | |
853 | ((tx_queue & 0x1) << 4) + 8); | |
a51d8c21 | 854 | q_vector->eims_value = BIT(msix_vector); |
55cac248 | 855 | break; |
2d064c06 AD |
856 | default: |
857 | BUG(); | |
858 | break; | |
859 | } | |
26b39276 AD |
860 | |
861 | /* add q_vector eims value to global eims_enable_mask */ | |
862 | adapter->eims_enable_mask |= q_vector->eims_value; | |
863 | ||
864 | /* configure q_vector to set itr on first interrupt */ | |
865 | q_vector->set_itr = 1; | |
9d5c8243 AK |
866 | } |
867 | ||
868 | /** | |
b980ac18 JK |
869 | * igb_configure_msix - Configure MSI-X hardware |
870 | * @adapter: board private structure to initialize | |
9d5c8243 | 871 | * |
b980ac18 JK |
872 | * igb_configure_msix sets up the hardware to properly |
873 | * generate MSI-X interrupts. | |
9d5c8243 AK |
874 | **/ |
875 | static void igb_configure_msix(struct igb_adapter *adapter) | |
876 | { | |
877 | u32 tmp; | |
878 | int i, vector = 0; | |
879 | struct e1000_hw *hw = &adapter->hw; | |
880 | ||
881 | adapter->eims_enable_mask = 0; | |
9d5c8243 AK |
882 | |
883 | /* set vector for other causes, i.e. link changes */ | |
2d064c06 AD |
884 | switch (hw->mac.type) { |
885 | case e1000_82575: | |
9d5c8243 AK |
886 | tmp = rd32(E1000_CTRL_EXT); |
887 | /* enable MSI-X PBA support*/ | |
888 | tmp |= E1000_CTRL_EXT_PBA_CLR; | |
889 | ||
890 | /* Auto-Mask interrupts upon ICR read. */ | |
891 | tmp |= E1000_CTRL_EXT_EIAME; | |
892 | tmp |= E1000_CTRL_EXT_IRCA; | |
893 | ||
894 | wr32(E1000_CTRL_EXT, tmp); | |
047e0030 AD |
895 | |
896 | /* enable msix_other interrupt */ | |
b980ac18 | 897 | array_wr32(E1000_MSIXBM(0), vector++, E1000_EIMS_OTHER); |
844290e5 | 898 | adapter->eims_other = E1000_EIMS_OTHER; |
9d5c8243 | 899 | |
2d064c06 AD |
900 | break; |
901 | ||
902 | case e1000_82576: | |
55cac248 | 903 | case e1000_82580: |
d2ba2ed8 | 904 | case e1000_i350: |
ceb5f13b | 905 | case e1000_i354: |
f96a8a0b CW |
906 | case e1000_i210: |
907 | case e1000_i211: | |
047e0030 | 908 | /* Turn on MSI-X capability first, or our settings |
b980ac18 JK |
909 | * won't stick. And it will take days to debug. |
910 | */ | |
047e0030 | 911 | wr32(E1000_GPIE, E1000_GPIE_MSIX_MODE | |
b980ac18 JK |
912 | E1000_GPIE_PBA | E1000_GPIE_EIAME | |
913 | E1000_GPIE_NSICR); | |
047e0030 AD |
914 | |
915 | /* enable msix_other interrupt */ | |
a51d8c21 | 916 | adapter->eims_other = BIT(vector); |
2d064c06 | 917 | tmp = (vector++ | E1000_IVAR_VALID) << 8; |
2d064c06 | 918 | |
047e0030 | 919 | wr32(E1000_IVAR_MISC, tmp); |
2d064c06 AD |
920 | break; |
921 | default: | |
922 | /* do nothing, since nothing else supports MSI-X */ | |
923 | break; | |
924 | } /* switch (hw->mac.type) */ | |
047e0030 AD |
925 | |
926 | adapter->eims_enable_mask |= adapter->eims_other; | |
927 | ||
26b39276 AD |
928 | for (i = 0; i < adapter->num_q_vectors; i++) |
929 | igb_assign_vector(adapter->q_vector[i], vector++); | |
047e0030 | 930 | |
9d5c8243 AK |
931 | wrfl(); |
932 | } | |
933 | ||
934 | /** | |
b980ac18 JK |
935 | * igb_request_msix - Initialize MSI-X interrupts |
936 | * @adapter: board private structure to initialize | |
9d5c8243 | 937 | * |
b980ac18 JK |
938 | * igb_request_msix allocates MSI-X vectors and requests interrupts from the |
939 | * kernel. | |
9d5c8243 AK |
940 | **/ |
941 | static int igb_request_msix(struct igb_adapter *adapter) | |
942 | { | |
943 | struct net_device *netdev = adapter->netdev; | |
52285b76 | 944 | int i, err = 0, vector = 0, free_vector = 0; |
9d5c8243 | 945 | |
047e0030 | 946 | err = request_irq(adapter->msix_entries[vector].vector, |
b980ac18 | 947 | igb_msix_other, 0, netdev->name, adapter); |
047e0030 | 948 | if (err) |
52285b76 | 949 | goto err_out; |
047e0030 AD |
950 | |
951 | for (i = 0; i < adapter->num_q_vectors; i++) { | |
952 | struct igb_q_vector *q_vector = adapter->q_vector[i]; | |
953 | ||
52285b76 SA |
954 | vector++; |
955 | ||
7b06a690 | 956 | q_vector->itr_register = adapter->io_addr + E1000_EITR(vector); |
047e0030 | 957 | |
0ba82994 | 958 | if (q_vector->rx.ring && q_vector->tx.ring) |
047e0030 | 959 | sprintf(q_vector->name, "%s-TxRx-%u", netdev->name, |
0ba82994 AD |
960 | q_vector->rx.ring->queue_index); |
961 | else if (q_vector->tx.ring) | |
047e0030 | 962 | sprintf(q_vector->name, "%s-tx-%u", netdev->name, |
0ba82994 AD |
963 | q_vector->tx.ring->queue_index); |
964 | else if (q_vector->rx.ring) | |
047e0030 | 965 | sprintf(q_vector->name, "%s-rx-%u", netdev->name, |
0ba82994 | 966 | q_vector->rx.ring->queue_index); |
9d5c8243 | 967 | else |
047e0030 AD |
968 | sprintf(q_vector->name, "%s-unused", netdev->name); |
969 | ||
9d5c8243 | 970 | err = request_irq(adapter->msix_entries[vector].vector, |
b980ac18 JK |
971 | igb_msix_ring, 0, q_vector->name, |
972 | q_vector); | |
9d5c8243 | 973 | if (err) |
52285b76 | 974 | goto err_free; |
9d5c8243 AK |
975 | } |
976 | ||
9d5c8243 AK |
977 | igb_configure_msix(adapter); |
978 | return 0; | |
52285b76 SA |
979 | |
980 | err_free: | |
981 | /* free already assigned IRQs */ | |
982 | free_irq(adapter->msix_entries[free_vector++].vector, adapter); | |
983 | ||
984 | vector--; | |
985 | for (i = 0; i < vector; i++) { | |
986 | free_irq(adapter->msix_entries[free_vector++].vector, | |
987 | adapter->q_vector[i]); | |
988 | } | |
989 | err_out: | |
9d5c8243 AK |
990 | return err; |
991 | } | |
992 | ||
5536d210 | 993 | /** |
b980ac18 JK |
994 | * igb_free_q_vector - Free memory allocated for specific interrupt vector |
995 | * @adapter: board private structure to initialize | |
996 | * @v_idx: Index of vector to be freed | |
5536d210 | 997 | * |
02ef6e1d | 998 | * This function frees the memory allocated to the q_vector. |
5536d210 AD |
999 | **/ |
1000 | static void igb_free_q_vector(struct igb_adapter *adapter, int v_idx) | |
1001 | { | |
1002 | struct igb_q_vector *q_vector = adapter->q_vector[v_idx]; | |
1003 | ||
02ef6e1d CW |
1004 | adapter->q_vector[v_idx] = NULL; |
1005 | ||
1006 | /* igb_get_stats64() might access the rings on this vector, | |
1007 | * we must wait a grace period before freeing it. | |
1008 | */ | |
17a402a0 CW |
1009 | if (q_vector) |
1010 | kfree_rcu(q_vector, rcu); | |
02ef6e1d CW |
1011 | } |
1012 | ||
1013 | /** | |
1014 | * igb_reset_q_vector - Reset config for interrupt vector | |
1015 | * @adapter: board private structure to initialize | |
1016 | * @v_idx: Index of vector to be reset | |
1017 | * | |
1018 | * If NAPI is enabled it will delete any references to the | |
1019 | * NAPI struct. This is preparation for igb_free_q_vector. | |
1020 | **/ | |
1021 | static void igb_reset_q_vector(struct igb_adapter *adapter, int v_idx) | |
1022 | { | |
1023 | struct igb_q_vector *q_vector = adapter->q_vector[v_idx]; | |
1024 | ||
cb06d102 CP |
1025 | /* Coming from igb_set_interrupt_capability, the vectors are not yet |
1026 | * allocated. So, q_vector is NULL so we should stop here. | |
1027 | */ | |
1028 | if (!q_vector) | |
1029 | return; | |
1030 | ||
5536d210 AD |
1031 | if (q_vector->tx.ring) |
1032 | adapter->tx_ring[q_vector->tx.ring->queue_index] = NULL; | |
1033 | ||
1034 | if (q_vector->rx.ring) | |
2439fc4d | 1035 | adapter->rx_ring[q_vector->rx.ring->queue_index] = NULL; |
5536d210 | 1036 | |
5536d210 AD |
1037 | netif_napi_del(&q_vector->napi); |
1038 | ||
02ef6e1d CW |
1039 | } |
1040 | ||
1041 | static void igb_reset_interrupt_capability(struct igb_adapter *adapter) | |
1042 | { | |
1043 | int v_idx = adapter->num_q_vectors; | |
1044 | ||
cd14ef54 | 1045 | if (adapter->flags & IGB_FLAG_HAS_MSIX) |
02ef6e1d | 1046 | pci_disable_msix(adapter->pdev); |
cd14ef54 | 1047 | else if (adapter->flags & IGB_FLAG_HAS_MSI) |
02ef6e1d | 1048 | pci_disable_msi(adapter->pdev); |
02ef6e1d CW |
1049 | |
1050 | while (v_idx--) | |
1051 | igb_reset_q_vector(adapter, v_idx); | |
5536d210 AD |
1052 | } |
1053 | ||
047e0030 | 1054 | /** |
b980ac18 JK |
1055 | * igb_free_q_vectors - Free memory allocated for interrupt vectors |
1056 | * @adapter: board private structure to initialize | |
047e0030 | 1057 | * |
b980ac18 JK |
1058 | * This function frees the memory allocated to the q_vectors. In addition if |
1059 | * NAPI is enabled it will delete any references to the NAPI struct prior | |
1060 | * to freeing the q_vector. | |
047e0030 AD |
1061 | **/ |
1062 | static void igb_free_q_vectors(struct igb_adapter *adapter) | |
1063 | { | |
5536d210 AD |
1064 | int v_idx = adapter->num_q_vectors; |
1065 | ||
1066 | adapter->num_tx_queues = 0; | |
1067 | adapter->num_rx_queues = 0; | |
047e0030 | 1068 | adapter->num_q_vectors = 0; |
5536d210 | 1069 | |
02ef6e1d CW |
1070 | while (v_idx--) { |
1071 | igb_reset_q_vector(adapter, v_idx); | |
5536d210 | 1072 | igb_free_q_vector(adapter, v_idx); |
02ef6e1d | 1073 | } |
047e0030 AD |
1074 | } |
1075 | ||
1076 | /** | |
b980ac18 JK |
1077 | * igb_clear_interrupt_scheme - reset the device to a state of no interrupts |
1078 | * @adapter: board private structure to initialize | |
047e0030 | 1079 | * |
b980ac18 JK |
1080 | * This function resets the device so that it has 0 Rx queues, Tx queues, and |
1081 | * MSI-X interrupts allocated. | |
047e0030 AD |
1082 | */ |
1083 | static void igb_clear_interrupt_scheme(struct igb_adapter *adapter) | |
1084 | { | |
047e0030 AD |
1085 | igb_free_q_vectors(adapter); |
1086 | igb_reset_interrupt_capability(adapter); | |
1087 | } | |
9d5c8243 AK |
1088 | |
1089 | /** | |
b980ac18 JK |
1090 | * igb_set_interrupt_capability - set MSI or MSI-X if supported |
1091 | * @adapter: board private structure to initialize | |
1092 | * @msix: boolean value of MSIX capability | |
9d5c8243 | 1093 | * |
b980ac18 JK |
1094 | * Attempt to configure interrupts using the best available |
1095 | * capabilities of the hardware and kernel. | |
9d5c8243 | 1096 | **/ |
53c7d064 | 1097 | static void igb_set_interrupt_capability(struct igb_adapter *adapter, bool msix) |
9d5c8243 AK |
1098 | { |
1099 | int err; | |
1100 | int numvecs, i; | |
1101 | ||
53c7d064 SA |
1102 | if (!msix) |
1103 | goto msi_only; | |
cd14ef54 | 1104 | adapter->flags |= IGB_FLAG_HAS_MSIX; |
53c7d064 | 1105 | |
83b7180d | 1106 | /* Number of supported queues. */ |
a99955fc | 1107 | adapter->num_rx_queues = adapter->rss_queues; |
5fa8517f GR |
1108 | if (adapter->vfs_allocated_count) |
1109 | adapter->num_tx_queues = 1; | |
1110 | else | |
1111 | adapter->num_tx_queues = adapter->rss_queues; | |
83b7180d | 1112 | |
b980ac18 | 1113 | /* start with one vector for every Rx queue */ |
047e0030 AD |
1114 | numvecs = adapter->num_rx_queues; |
1115 | ||
b980ac18 | 1116 | /* if Tx handler is separate add 1 for every Tx queue */ |
a99955fc AD |
1117 | if (!(adapter->flags & IGB_FLAG_QUEUE_PAIRS)) |
1118 | numvecs += adapter->num_tx_queues; | |
047e0030 AD |
1119 | |
1120 | /* store the number of vectors reserved for queues */ | |
1121 | adapter->num_q_vectors = numvecs; | |
1122 | ||
1123 | /* add 1 vector for link status interrupts */ | |
1124 | numvecs++; | |
9d5c8243 AK |
1125 | for (i = 0; i < numvecs; i++) |
1126 | adapter->msix_entries[i].entry = i; | |
1127 | ||
479d02df AG |
1128 | err = pci_enable_msix_range(adapter->pdev, |
1129 | adapter->msix_entries, | |
1130 | numvecs, | |
1131 | numvecs); | |
1132 | if (err > 0) | |
0c2cc02e | 1133 | return; |
9d5c8243 AK |
1134 | |
1135 | igb_reset_interrupt_capability(adapter); | |
1136 | ||
1137 | /* If we can't do MSI-X, try MSI */ | |
1138 | msi_only: | |
b709323d | 1139 | adapter->flags &= ~IGB_FLAG_HAS_MSIX; |
2a3abf6d AD |
1140 | #ifdef CONFIG_PCI_IOV |
1141 | /* disable SR-IOV for non MSI-X configurations */ | |
1142 | if (adapter->vf_data) { | |
1143 | struct e1000_hw *hw = &adapter->hw; | |
1144 | /* disable iov and allow time for transactions to clear */ | |
1145 | pci_disable_sriov(adapter->pdev); | |
1146 | msleep(500); | |
1147 | ||
4827cc37 YK |
1148 | kfree(adapter->vf_mac_list); |
1149 | adapter->vf_mac_list = NULL; | |
2a3abf6d AD |
1150 | kfree(adapter->vf_data); |
1151 | adapter->vf_data = NULL; | |
1152 | wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ); | |
945a5151 | 1153 | wrfl(); |
2a3abf6d AD |
1154 | msleep(100); |
1155 | dev_info(&adapter->pdev->dev, "IOV Disabled\n"); | |
1156 | } | |
1157 | #endif | |
4fc82adf | 1158 | adapter->vfs_allocated_count = 0; |
a99955fc | 1159 | adapter->rss_queues = 1; |
4fc82adf | 1160 | adapter->flags |= IGB_FLAG_QUEUE_PAIRS; |
9d5c8243 | 1161 | adapter->num_rx_queues = 1; |
661086df | 1162 | adapter->num_tx_queues = 1; |
047e0030 | 1163 | adapter->num_q_vectors = 1; |
9d5c8243 | 1164 | if (!pci_enable_msi(adapter->pdev)) |
7dfc16fa | 1165 | adapter->flags |= IGB_FLAG_HAS_MSI; |
9d5c8243 AK |
1166 | } |
1167 | ||
5536d210 AD |
1168 | static void igb_add_ring(struct igb_ring *ring, |
1169 | struct igb_ring_container *head) | |
1170 | { | |
1171 | head->ring = ring; | |
1172 | head->count++; | |
1173 | } | |
1174 | ||
047e0030 | 1175 | /** |
b980ac18 JK |
1176 | * igb_alloc_q_vector - Allocate memory for a single interrupt vector |
1177 | * @adapter: board private structure to initialize | |
1178 | * @v_count: q_vectors allocated on adapter, used for ring interleaving | |
1179 | * @v_idx: index of vector in adapter struct | |
1180 | * @txr_count: total number of Tx rings to allocate | |
1181 | * @txr_idx: index of first Tx ring to allocate | |
1182 | * @rxr_count: total number of Rx rings to allocate | |
1183 | * @rxr_idx: index of first Rx ring to allocate | |
047e0030 | 1184 | * |
b980ac18 | 1185 | * We allocate one q_vector. If allocation fails we return -ENOMEM. |
047e0030 | 1186 | **/ |
5536d210 AD |
1187 | static int igb_alloc_q_vector(struct igb_adapter *adapter, |
1188 | int v_count, int v_idx, | |
1189 | int txr_count, int txr_idx, | |
1190 | int rxr_count, int rxr_idx) | |
047e0030 AD |
1191 | { |
1192 | struct igb_q_vector *q_vector; | |
5536d210 | 1193 | struct igb_ring *ring; |
a0feac18 GS |
1194 | int ring_count; |
1195 | size_t size; | |
047e0030 | 1196 | |
5536d210 AD |
1197 | /* igb only supports 1 Tx and/or 1 Rx queue per vector */ |
1198 | if (txr_count > 1 || rxr_count > 1) | |
1199 | return -ENOMEM; | |
1200 | ||
1201 | ring_count = txr_count + rxr_count; | |
a0feac18 | 1202 | size = struct_size(q_vector, ring, ring_count); |
5536d210 AD |
1203 | |
1204 | /* allocate q_vector and rings */ | |
02ef6e1d | 1205 | q_vector = adapter->q_vector[v_idx]; |
72ddef05 | 1206 | if (!q_vector) { |
02ef6e1d | 1207 | q_vector = kzalloc(size, GFP_KERNEL); |
72ddef05 SS |
1208 | } else if (size > ksize(q_vector)) { |
1209 | kfree_rcu(q_vector, rcu); | |
1210 | q_vector = kzalloc(size, GFP_KERNEL); | |
1211 | } else { | |
c0a06ee1 | 1212 | memset(q_vector, 0, size); |
72ddef05 | 1213 | } |
5536d210 AD |
1214 | if (!q_vector) |
1215 | return -ENOMEM; | |
1216 | ||
1217 | /* initialize NAPI */ | |
1218 | netif_napi_add(adapter->netdev, &q_vector->napi, | |
1219 | igb_poll, 64); | |
1220 | ||
1221 | /* tie q_vector and adapter together */ | |
1222 | adapter->q_vector[v_idx] = q_vector; | |
1223 | q_vector->adapter = adapter; | |
1224 | ||
1225 | /* initialize work limits */ | |
1226 | q_vector->tx.work_limit = adapter->tx_work_limit; | |
1227 | ||
1228 | /* initialize ITR configuration */ | |
7b06a690 | 1229 | q_vector->itr_register = adapter->io_addr + E1000_EITR(0); |
5536d210 AD |
1230 | q_vector->itr_val = IGB_START_ITR; |
1231 | ||
1232 | /* initialize pointer to rings */ | |
1233 | ring = q_vector->ring; | |
1234 | ||
4e227667 AD |
1235 | /* intialize ITR */ |
1236 | if (rxr_count) { | |
1237 | /* rx or rx/tx vector */ | |
1238 | if (!adapter->rx_itr_setting || adapter->rx_itr_setting > 3) | |
1239 | q_vector->itr_val = adapter->rx_itr_setting; | |
1240 | } else { | |
1241 | /* tx only vector */ | |
1242 | if (!adapter->tx_itr_setting || adapter->tx_itr_setting > 3) | |
1243 | q_vector->itr_val = adapter->tx_itr_setting; | |
1244 | } | |
1245 | ||
5536d210 AD |
1246 | if (txr_count) { |
1247 | /* assign generic ring traits */ | |
1248 | ring->dev = &adapter->pdev->dev; | |
1249 | ring->netdev = adapter->netdev; | |
1250 | ||
1251 | /* configure backlink on ring */ | |
1252 | ring->q_vector = q_vector; | |
1253 | ||
1254 | /* update q_vector Tx values */ | |
1255 | igb_add_ring(ring, &q_vector->tx); | |
1256 | ||
1257 | /* For 82575, context index must be unique per ring. */ | |
1258 | if (adapter->hw.mac.type == e1000_82575) | |
1259 | set_bit(IGB_RING_FLAG_TX_CTX_IDX, &ring->flags); | |
1260 | ||
1261 | /* apply Tx specific ring traits */ | |
1262 | ring->count = adapter->tx_ring_count; | |
1263 | ring->queue_index = txr_idx; | |
1264 | ||
05f9d3e1 AG |
1265 | ring->cbs_enable = false; |
1266 | ring->idleslope = 0; | |
1267 | ring->sendslope = 0; | |
1268 | ring->hicredit = 0; | |
1269 | ring->locredit = 0; | |
1270 | ||
827da44c JS |
1271 | u64_stats_init(&ring->tx_syncp); |
1272 | u64_stats_init(&ring->tx_syncp2); | |
1273 | ||
5536d210 AD |
1274 | /* assign ring to adapter */ |
1275 | adapter->tx_ring[txr_idx] = ring; | |
1276 | ||
1277 | /* push pointer to next ring */ | |
1278 | ring++; | |
047e0030 | 1279 | } |
81c2fc22 | 1280 | |
5536d210 AD |
1281 | if (rxr_count) { |
1282 | /* assign generic ring traits */ | |
1283 | ring->dev = &adapter->pdev->dev; | |
1284 | ring->netdev = adapter->netdev; | |
047e0030 | 1285 | |
5536d210 AD |
1286 | /* configure backlink on ring */ |
1287 | ring->q_vector = q_vector; | |
047e0030 | 1288 | |
5536d210 AD |
1289 | /* update q_vector Rx values */ |
1290 | igb_add_ring(ring, &q_vector->rx); | |
047e0030 | 1291 | |
5536d210 AD |
1292 | /* set flag indicating ring supports SCTP checksum offload */ |
1293 | if (adapter->hw.mac.type >= e1000_82576) | |
1294 | set_bit(IGB_RING_FLAG_RX_SCTP_CSUM, &ring->flags); | |
047e0030 | 1295 | |
e52c0f96 | 1296 | /* On i350, i354, i210, and i211, loopback VLAN packets |
5536d210 | 1297 | * have the tag byte-swapped. |
b980ac18 | 1298 | */ |
5536d210 AD |
1299 | if (adapter->hw.mac.type >= e1000_i350) |
1300 | set_bit(IGB_RING_FLAG_RX_LB_VLAN_BSWAP, &ring->flags); | |
047e0030 | 1301 | |
5536d210 AD |
1302 | /* apply Rx specific ring traits */ |
1303 | ring->count = adapter->rx_ring_count; | |
1304 | ring->queue_index = rxr_idx; | |
1305 | ||
827da44c JS |
1306 | u64_stats_init(&ring->rx_syncp); |
1307 | ||
5536d210 AD |
1308 | /* assign ring to adapter */ |
1309 | adapter->rx_ring[rxr_idx] = ring; | |
1310 | } | |
1311 | ||
1312 | return 0; | |
047e0030 AD |
1313 | } |
1314 | ||
5536d210 | 1315 | |
047e0030 | 1316 | /** |
b980ac18 JK |
1317 | * igb_alloc_q_vectors - Allocate memory for interrupt vectors |
1318 | * @adapter: board private structure to initialize | |
047e0030 | 1319 | * |
b980ac18 JK |
1320 | * We allocate one q_vector per queue interrupt. If allocation fails we |
1321 | * return -ENOMEM. | |
047e0030 | 1322 | **/ |
5536d210 | 1323 | static int igb_alloc_q_vectors(struct igb_adapter *adapter) |
047e0030 | 1324 | { |
5536d210 AD |
1325 | int q_vectors = adapter->num_q_vectors; |
1326 | int rxr_remaining = adapter->num_rx_queues; | |
1327 | int txr_remaining = adapter->num_tx_queues; | |
1328 | int rxr_idx = 0, txr_idx = 0, v_idx = 0; | |
1329 | int err; | |
047e0030 | 1330 | |
5536d210 AD |
1331 | if (q_vectors >= (rxr_remaining + txr_remaining)) { |
1332 | for (; rxr_remaining; v_idx++) { | |
1333 | err = igb_alloc_q_vector(adapter, q_vectors, v_idx, | |
1334 | 0, 0, 1, rxr_idx); | |
047e0030 | 1335 | |
5536d210 AD |
1336 | if (err) |
1337 | goto err_out; | |
1338 | ||
1339 | /* update counts and index */ | |
1340 | rxr_remaining--; | |
1341 | rxr_idx++; | |
047e0030 | 1342 | } |
047e0030 | 1343 | } |
5536d210 AD |
1344 | |
1345 | for (; v_idx < q_vectors; v_idx++) { | |
1346 | int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_idx); | |
1347 | int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_idx); | |
9005df38 | 1348 | |
5536d210 AD |
1349 | err = igb_alloc_q_vector(adapter, q_vectors, v_idx, |
1350 | tqpv, txr_idx, rqpv, rxr_idx); | |
1351 | ||
1352 | if (err) | |
1353 | goto err_out; | |
1354 | ||
1355 | /* update counts and index */ | |
1356 | rxr_remaining -= rqpv; | |
1357 | txr_remaining -= tqpv; | |
1358 | rxr_idx++; | |
1359 | txr_idx++; | |
1360 | } | |
1361 | ||
047e0030 | 1362 | return 0; |
5536d210 AD |
1363 | |
1364 | err_out: | |
1365 | adapter->num_tx_queues = 0; | |
1366 | adapter->num_rx_queues = 0; | |
1367 | adapter->num_q_vectors = 0; | |
1368 | ||
1369 | while (v_idx--) | |
1370 | igb_free_q_vector(adapter, v_idx); | |
1371 | ||
1372 | return -ENOMEM; | |
047e0030 AD |
1373 | } |
1374 | ||
1375 | /** | |
b980ac18 JK |
1376 | * igb_init_interrupt_scheme - initialize interrupts, allocate queues/vectors |
1377 | * @adapter: board private structure to initialize | |
1378 | * @msix: boolean value of MSIX capability | |
047e0030 | 1379 | * |
b980ac18 | 1380 | * This function initializes the interrupts and allocates all of the queues. |
047e0030 | 1381 | **/ |
53c7d064 | 1382 | static int igb_init_interrupt_scheme(struct igb_adapter *adapter, bool msix) |
047e0030 AD |
1383 | { |
1384 | struct pci_dev *pdev = adapter->pdev; | |
1385 | int err; | |
1386 | ||
53c7d064 | 1387 | igb_set_interrupt_capability(adapter, msix); |
047e0030 AD |
1388 | |
1389 | err = igb_alloc_q_vectors(adapter); | |
1390 | if (err) { | |
1391 | dev_err(&pdev->dev, "Unable to allocate memory for vectors\n"); | |
1392 | goto err_alloc_q_vectors; | |
1393 | } | |
1394 | ||
5536d210 | 1395 | igb_cache_ring_register(adapter); |
047e0030 AD |
1396 | |
1397 | return 0; | |
5536d210 | 1398 | |
047e0030 AD |
1399 | err_alloc_q_vectors: |
1400 | igb_reset_interrupt_capability(adapter); | |
1401 | return err; | |
1402 | } | |
1403 | ||
9d5c8243 | 1404 | /** |
b980ac18 JK |
1405 | * igb_request_irq - initialize interrupts |
1406 | * @adapter: board private structure to initialize | |
9d5c8243 | 1407 | * |
b980ac18 JK |
1408 | * Attempts to configure interrupts using the best available |
1409 | * capabilities of the hardware and kernel. | |
9d5c8243 AK |
1410 | **/ |
1411 | static int igb_request_irq(struct igb_adapter *adapter) | |
1412 | { | |
1413 | struct net_device *netdev = adapter->netdev; | |
047e0030 | 1414 | struct pci_dev *pdev = adapter->pdev; |
9d5c8243 AK |
1415 | int err = 0; |
1416 | ||
cd14ef54 | 1417 | if (adapter->flags & IGB_FLAG_HAS_MSIX) { |
9d5c8243 | 1418 | err = igb_request_msix(adapter); |
844290e5 | 1419 | if (!err) |
9d5c8243 | 1420 | goto request_done; |
9d5c8243 | 1421 | /* fall back to MSI */ |
5536d210 AD |
1422 | igb_free_all_tx_resources(adapter); |
1423 | igb_free_all_rx_resources(adapter); | |
53c7d064 | 1424 | |
047e0030 | 1425 | igb_clear_interrupt_scheme(adapter); |
53c7d064 SA |
1426 | err = igb_init_interrupt_scheme(adapter, false); |
1427 | if (err) | |
047e0030 | 1428 | goto request_done; |
53c7d064 | 1429 | |
047e0030 AD |
1430 | igb_setup_all_tx_resources(adapter); |
1431 | igb_setup_all_rx_resources(adapter); | |
53c7d064 | 1432 | igb_configure(adapter); |
9d5c8243 | 1433 | } |
844290e5 | 1434 | |
c74d588e AD |
1435 | igb_assign_vector(adapter->q_vector[0], 0); |
1436 | ||
7dfc16fa | 1437 | if (adapter->flags & IGB_FLAG_HAS_MSI) { |
c74d588e | 1438 | err = request_irq(pdev->irq, igb_intr_msi, 0, |
047e0030 | 1439 | netdev->name, adapter); |
9d5c8243 AK |
1440 | if (!err) |
1441 | goto request_done; | |
047e0030 | 1442 | |
9d5c8243 AK |
1443 | /* fall back to legacy interrupts */ |
1444 | igb_reset_interrupt_capability(adapter); | |
7dfc16fa | 1445 | adapter->flags &= ~IGB_FLAG_HAS_MSI; |
9d5c8243 AK |
1446 | } |
1447 | ||
c74d588e | 1448 | err = request_irq(pdev->irq, igb_intr, IRQF_SHARED, |
047e0030 | 1449 | netdev->name, adapter); |
9d5c8243 | 1450 | |
6cb5e577 | 1451 | if (err) |
c74d588e | 1452 | dev_err(&pdev->dev, "Error %d getting interrupt\n", |
9d5c8243 | 1453 | err); |
9d5c8243 AK |
1454 | |
1455 | request_done: | |
1456 | return err; | |
1457 | } | |
1458 | ||
1459 | static void igb_free_irq(struct igb_adapter *adapter) | |
1460 | { | |
cd14ef54 | 1461 | if (adapter->flags & IGB_FLAG_HAS_MSIX) { |
9d5c8243 AK |
1462 | int vector = 0, i; |
1463 | ||
047e0030 | 1464 | free_irq(adapter->msix_entries[vector++].vector, adapter); |
9d5c8243 | 1465 | |
0d1ae7f4 | 1466 | for (i = 0; i < adapter->num_q_vectors; i++) |
047e0030 | 1467 | free_irq(adapter->msix_entries[vector++].vector, |
0d1ae7f4 | 1468 | adapter->q_vector[i]); |
047e0030 AD |
1469 | } else { |
1470 | free_irq(adapter->pdev->irq, adapter); | |
9d5c8243 | 1471 | } |
9d5c8243 AK |
1472 | } |
1473 | ||
1474 | /** | |
b980ac18 JK |
1475 | * igb_irq_disable - Mask off interrupt generation on the NIC |
1476 | * @adapter: board private structure | |
9d5c8243 AK |
1477 | **/ |
1478 | static void igb_irq_disable(struct igb_adapter *adapter) | |
1479 | { | |
1480 | struct e1000_hw *hw = &adapter->hw; | |
1481 | ||
b980ac18 | 1482 | /* we need to be careful when disabling interrupts. The VFs are also |
25568a53 AD |
1483 | * mapped into these registers and so clearing the bits can cause |
1484 | * issues on the VF drivers so we only need to clear what we set | |
1485 | */ | |
cd14ef54 | 1486 | if (adapter->flags & IGB_FLAG_HAS_MSIX) { |
2dfd1212 | 1487 | u32 regval = rd32(E1000_EIAM); |
9005df38 | 1488 | |
2dfd1212 AD |
1489 | wr32(E1000_EIAM, regval & ~adapter->eims_enable_mask); |
1490 | wr32(E1000_EIMC, adapter->eims_enable_mask); | |
1491 | regval = rd32(E1000_EIAC); | |
1492 | wr32(E1000_EIAC, regval & ~adapter->eims_enable_mask); | |
9d5c8243 | 1493 | } |
844290e5 PW |
1494 | |
1495 | wr32(E1000_IAM, 0); | |
9d5c8243 AK |
1496 | wr32(E1000_IMC, ~0); |
1497 | wrfl(); | |
cd14ef54 | 1498 | if (adapter->flags & IGB_FLAG_HAS_MSIX) { |
81a61859 | 1499 | int i; |
9005df38 | 1500 | |
81a61859 ET |
1501 | for (i = 0; i < adapter->num_q_vectors; i++) |
1502 | synchronize_irq(adapter->msix_entries[i].vector); | |
1503 | } else { | |
1504 | synchronize_irq(adapter->pdev->irq); | |
1505 | } | |
9d5c8243 AK |
1506 | } |
1507 | ||
1508 | /** | |
b980ac18 JK |
1509 | * igb_irq_enable - Enable default interrupt generation settings |
1510 | * @adapter: board private structure | |
9d5c8243 AK |
1511 | **/ |
1512 | static void igb_irq_enable(struct igb_adapter *adapter) | |
1513 | { | |
1514 | struct e1000_hw *hw = &adapter->hw; | |
1515 | ||
cd14ef54 | 1516 | if (adapter->flags & IGB_FLAG_HAS_MSIX) { |
06218a8d | 1517 | u32 ims = E1000_IMS_LSC | E1000_IMS_DOUTSYNC | E1000_IMS_DRSTA; |
2dfd1212 | 1518 | u32 regval = rd32(E1000_EIAC); |
9005df38 | 1519 | |
2dfd1212 AD |
1520 | wr32(E1000_EIAC, regval | adapter->eims_enable_mask); |
1521 | regval = rd32(E1000_EIAM); | |
1522 | wr32(E1000_EIAM, regval | adapter->eims_enable_mask); | |
844290e5 | 1523 | wr32(E1000_EIMS, adapter->eims_enable_mask); |
25568a53 | 1524 | if (adapter->vfs_allocated_count) { |
4ae196df | 1525 | wr32(E1000_MBVFIMR, 0xFF); |
25568a53 AD |
1526 | ims |= E1000_IMS_VMMB; |
1527 | } | |
1528 | wr32(E1000_IMS, ims); | |
844290e5 | 1529 | } else { |
55cac248 AD |
1530 | wr32(E1000_IMS, IMS_ENABLE_MASK | |
1531 | E1000_IMS_DRSTA); | |
1532 | wr32(E1000_IAM, IMS_ENABLE_MASK | | |
1533 | E1000_IMS_DRSTA); | |
844290e5 | 1534 | } |
9d5c8243 AK |
1535 | } |
1536 | ||
1537 | static void igb_update_mng_vlan(struct igb_adapter *adapter) | |
1538 | { | |
51466239 | 1539 | struct e1000_hw *hw = &adapter->hw; |
8b77c6b2 | 1540 | u16 pf_id = adapter->vfs_allocated_count; |
9d5c8243 AK |
1541 | u16 vid = adapter->hw.mng_cookie.vlan_id; |
1542 | u16 old_vid = adapter->mng_vlan_id; | |
51466239 AD |
1543 | |
1544 | if (hw->mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN) { | |
1545 | /* add VID to filter table */ | |
8b77c6b2 | 1546 | igb_vfta_set(hw, vid, pf_id, true, true); |
51466239 AD |
1547 | adapter->mng_vlan_id = vid; |
1548 | } else { | |
1549 | adapter->mng_vlan_id = IGB_MNG_VLAN_NONE; | |
1550 | } | |
1551 | ||
1552 | if ((old_vid != (u16)IGB_MNG_VLAN_NONE) && | |
1553 | (vid != old_vid) && | |
b2cb09b1 | 1554 | !test_bit(old_vid, adapter->active_vlans)) { |
51466239 | 1555 | /* remove VID from filter table */ |
8b77c6b2 | 1556 | igb_vfta_set(hw, vid, pf_id, false, true); |
9d5c8243 AK |
1557 | } |
1558 | } | |
1559 | ||
1560 | /** | |
b980ac18 JK |
1561 | * igb_release_hw_control - release control of the h/w to f/w |
1562 | * @adapter: address of board private structure | |
9d5c8243 | 1563 | * |
b980ac18 JK |
1564 | * igb_release_hw_control resets CTRL_EXT:DRV_LOAD bit. |
1565 | * For ASF and Pass Through versions of f/w this means that the | |
1566 | * driver is no longer loaded. | |
9d5c8243 AK |
1567 | **/ |
1568 | static void igb_release_hw_control(struct igb_adapter *adapter) | |
1569 | { | |
1570 | struct e1000_hw *hw = &adapter->hw; | |
1571 | u32 ctrl_ext; | |
1572 | ||
1573 | /* Let firmware take over control of h/w */ | |
1574 | ctrl_ext = rd32(E1000_CTRL_EXT); | |
1575 | wr32(E1000_CTRL_EXT, | |
1576 | ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD); | |
1577 | } | |
1578 | ||
9d5c8243 | 1579 | /** |
b980ac18 JK |
1580 | * igb_get_hw_control - get control of the h/w from f/w |
1581 | * @adapter: address of board private structure | |
9d5c8243 | 1582 | * |
b980ac18 JK |
1583 | * igb_get_hw_control sets CTRL_EXT:DRV_LOAD bit. |
1584 | * For ASF and Pass Through versions of f/w this means that | |
1585 | * the driver is loaded. | |
9d5c8243 AK |
1586 | **/ |
1587 | static void igb_get_hw_control(struct igb_adapter *adapter) | |
1588 | { | |
1589 | struct e1000_hw *hw = &adapter->hw; | |
1590 | u32 ctrl_ext; | |
1591 | ||
1592 | /* Let firmware know the driver has taken over */ | |
1593 | ctrl_ext = rd32(E1000_CTRL_EXT); | |
1594 | wr32(E1000_CTRL_EXT, | |
1595 | ctrl_ext | E1000_CTRL_EXT_DRV_LOAD); | |
1596 | } | |
1597 | ||
05f9d3e1 AG |
1598 | static void enable_fqtss(struct igb_adapter *adapter, bool enable) |
1599 | { | |
1600 | struct net_device *netdev = adapter->netdev; | |
1601 | struct e1000_hw *hw = &adapter->hw; | |
1602 | ||
1603 | WARN_ON(hw->mac.type != e1000_i210); | |
1604 | ||
1605 | if (enable) | |
1606 | adapter->flags |= IGB_FLAG_FQTSS; | |
1607 | else | |
1608 | adapter->flags &= ~IGB_FLAG_FQTSS; | |
1609 | ||
1610 | if (netif_running(netdev)) | |
1611 | schedule_work(&adapter->reset_task); | |
1612 | } | |
1613 | ||
1614 | static bool is_fqtss_enabled(struct igb_adapter *adapter) | |
1615 | { | |
1616 | return (adapter->flags & IGB_FLAG_FQTSS) ? true : false; | |
1617 | } | |
1618 | ||
1619 | static void set_tx_desc_fetch_prio(struct e1000_hw *hw, int queue, | |
1620 | enum tx_queue_prio prio) | |
1621 | { | |
1622 | u32 val; | |
1623 | ||
1624 | WARN_ON(hw->mac.type != e1000_i210); | |
1625 | WARN_ON(queue < 0 || queue > 4); | |
1626 | ||
1627 | val = rd32(E1000_I210_TXDCTL(queue)); | |
1628 | ||
1629 | if (prio == TX_QUEUE_PRIO_HIGH) | |
1630 | val |= E1000_TXDCTL_PRIORITY; | |
1631 | else | |
1632 | val &= ~E1000_TXDCTL_PRIORITY; | |
1633 | ||
1634 | wr32(E1000_I210_TXDCTL(queue), val); | |
1635 | } | |
1636 | ||
1637 | static void set_queue_mode(struct e1000_hw *hw, int queue, enum queue_mode mode) | |
1638 | { | |
1639 | u32 val; | |
1640 | ||
1641 | WARN_ON(hw->mac.type != e1000_i210); | |
1642 | WARN_ON(queue < 0 || queue > 1); | |
1643 | ||
1644 | val = rd32(E1000_I210_TQAVCC(queue)); | |
1645 | ||
1646 | if (mode == QUEUE_MODE_STREAM_RESERVATION) | |
1647 | val |= E1000_TQAVCC_QUEUEMODE; | |
1648 | else | |
1649 | val &= ~E1000_TQAVCC_QUEUEMODE; | |
1650 | ||
1651 | wr32(E1000_I210_TQAVCC(queue), val); | |
1652 | } | |
1653 | ||
0364a0d0 JSP |
1654 | static bool is_any_cbs_enabled(struct igb_adapter *adapter) |
1655 | { | |
1656 | int i; | |
1657 | ||
1658 | for (i = 0; i < adapter->num_tx_queues; i++) { | |
1659 | if (adapter->tx_ring[i]->cbs_enable) | |
1660 | return true; | |
1661 | } | |
1662 | ||
1663 | return false; | |
1664 | } | |
1665 | ||
3048cf84 JSP |
1666 | static bool is_any_txtime_enabled(struct igb_adapter *adapter) |
1667 | { | |
1668 | int i; | |
1669 | ||
1670 | for (i = 0; i < adapter->num_tx_queues; i++) { | |
1671 | if (adapter->tx_ring[i]->launchtime_enable) | |
1672 | return true; | |
1673 | } | |
1674 | ||
1675 | return false; | |
1676 | } | |
1677 | ||
05f9d3e1 | 1678 | /** |
91db3642 | 1679 | * igb_config_tx_modes - Configure "Qav Tx mode" features on igb |
05f9d3e1 AG |
1680 | * @adapter: pointer to adapter struct |
1681 | * @queue: queue number | |
05f9d3e1 | 1682 | * |
3048cf84 JSP |
1683 | * Configure CBS and Launchtime for a given hardware queue. |
1684 | * Parameters are retrieved from the correct Tx ring, so | |
1685 | * igb_save_cbs_params() and igb_save_txtime_params() should be used | |
91db3642 | 1686 | * for setting those correctly prior to this function being called. |
05f9d3e1 | 1687 | **/ |
91db3642 | 1688 | static void igb_config_tx_modes(struct igb_adapter *adapter, int queue) |
05f9d3e1 | 1689 | { |
91db3642 | 1690 | struct igb_ring *ring = adapter->tx_ring[queue]; |
05f9d3e1 AG |
1691 | struct net_device *netdev = adapter->netdev; |
1692 | struct e1000_hw *hw = &adapter->hw; | |
0364a0d0 | 1693 | u32 tqavcc, tqavctrl; |
05f9d3e1 AG |
1694 | u16 value; |
1695 | ||
1696 | WARN_ON(hw->mac.type != e1000_i210); | |
1697 | WARN_ON(queue < 0 || queue > 1); | |
1698 | ||
3048cf84 JSP |
1699 | /* If any of the Qav features is enabled, configure queues as SR and |
1700 | * with HIGH PRIO. If none is, then configure them with LOW PRIO and | |
1701 | * as SP. | |
1702 | */ | |
1703 | if (ring->cbs_enable || ring->launchtime_enable) { | |
1704 | set_tx_desc_fetch_prio(hw, queue, TX_QUEUE_PRIO_HIGH); | |
1705 | set_queue_mode(hw, queue, QUEUE_MODE_STREAM_RESERVATION); | |
1706 | } else { | |
1707 | set_tx_desc_fetch_prio(hw, queue, TX_QUEUE_PRIO_LOW); | |
1708 | set_queue_mode(hw, queue, QUEUE_MODE_STRICT_PRIORITY); | |
1709 | } | |
1710 | ||
1711 | /* If CBS is enabled, set DataTranARB and config its parameters. */ | |
91db3642 | 1712 | if (ring->cbs_enable || queue == 0) { |
2707df97 VCG |
1713 | /* i210 does not allow the queue 0 to be in the Strict |
1714 | * Priority mode while the Qav mode is enabled, so, | |
1715 | * instead of disabling strict priority mode, we give | |
1716 | * queue 0 the maximum of credits possible. | |
1717 | * | |
1718 | * See section 8.12.19 of the i210 datasheet, "Note: | |
1719 | * Queue0 QueueMode must be set to 1b when | |
1720 | * TransmitMode is set to Qav." | |
1721 | */ | |
91db3642 | 1722 | if (queue == 0 && !ring->cbs_enable) { |
2707df97 | 1723 | /* max "linkspeed" idleslope in kbps */ |
91db3642 JSP |
1724 | ring->idleslope = 1000000; |
1725 | ring->hicredit = ETH_FRAME_LEN; | |
2707df97 VCG |
1726 | } |
1727 | ||
0364a0d0 JSP |
1728 | /* Always set data transfer arbitration to credit-based |
1729 | * shaper algorithm on TQAVCTRL if CBS is enabled for any of | |
1730 | * the queues. | |
1731 | */ | |
1732 | tqavctrl = rd32(E1000_I210_TQAVCTRL); | |
1733 | tqavctrl |= E1000_TQAVCTRL_DATATRANARB; | |
1734 | wr32(E1000_I210_TQAVCTRL, tqavctrl); | |
1735 | ||
05f9d3e1 AG |
1736 | /* According to i210 datasheet section 7.2.7.7, we should set |
1737 | * the 'idleSlope' field from TQAVCC register following the | |
1738 | * equation: | |
1739 | * | |
1740 | * For 100 Mbps link speed: | |
1741 | * | |
1742 | * value = BW * 0x7735 * 0.2 (E1) | |
1743 | * | |
1744 | * For 1000Mbps link speed: | |
1745 | * | |
1746 | * value = BW * 0x7735 * 2 (E2) | |
1747 | * | |
1748 | * E1 and E2 can be merged into one equation as shown below. | |
1749 | * Note that 'link-speed' is in Mbps. | |
1750 | * | |
1751 | * value = BW * 0x7735 * 2 * link-speed | |
1752 | * -------------- (E3) | |
1753 | * 1000 | |
1754 | * | |
1755 | * 'BW' is the percentage bandwidth out of full link speed | |
1756 | * which can be found with the following equation. Note that | |
1757 | * idleSlope here is the parameter from this function which | |
1758 | * is in kbps. | |
1759 | * | |
1760 | * BW = idleSlope | |
1761 | * ----------------- (E4) | |
1762 | * link-speed * 1000 | |
1763 | * | |
1764 | * That said, we can come up with a generic equation to | |
1765 | * calculate the value we should set it TQAVCC register by | |
1766 | * replacing 'BW' in E3 by E4. The resulting equation is: | |
1767 | * | |
1768 | * value = idleSlope * 0x7735 * 2 * link-speed | |
1769 | * ----------------- -------------- (E5) | |
1770 | * link-speed * 1000 1000 | |
1771 | * | |
1772 | * 'link-speed' is present in both sides of the fraction so | |
1773 | * it is canceled out. The final equation is the following: | |
1774 | * | |
1775 | * value = idleSlope * 61034 | |
1776 | * ----------------- (E6) | |
1777 | * 1000000 | |
0da6090f JSP |
1778 | * |
1779 | * NOTE: For i210, given the above, we can see that idleslope | |
1780 | * is represented in 16.38431 kbps units by the value at | |
1781 | * the TQAVCC register (1Gbps / 61034), which reduces | |
1782 | * the granularity for idleslope increments. | |
1783 | * For instance, if you want to configure a 2576kbps | |
1784 | * idleslope, the value to be written on the register | |
1785 | * would have to be 157.23. If rounded down, you end | |
1786 | * up with less bandwidth available than originally | |
1787 | * required (~2572 kbps). If rounded up, you end up | |
1788 | * with a higher bandwidth (~2589 kbps). Below the | |
1789 | * approach we take is to always round up the | |
1790 | * calculated value, so the resulting bandwidth might | |
1791 | * be slightly higher for some configurations. | |
05f9d3e1 | 1792 | */ |
91db3642 | 1793 | value = DIV_ROUND_UP_ULL(ring->idleslope * 61034ULL, 1000000); |
05f9d3e1 AG |
1794 | |
1795 | tqavcc = rd32(E1000_I210_TQAVCC(queue)); | |
1796 | tqavcc &= ~E1000_TQAVCC_IDLESLOPE_MASK; | |
1797 | tqavcc |= value; | |
1798 | wr32(E1000_I210_TQAVCC(queue), tqavcc); | |
1799 | ||
91db3642 JSP |
1800 | wr32(E1000_I210_TQAVHC(queue), |
1801 | 0x80000000 + ring->hicredit * 0x7735); | |
05f9d3e1 | 1802 | } else { |
05f9d3e1 AG |
1803 | |
1804 | /* Set idleSlope to zero. */ | |
1805 | tqavcc = rd32(E1000_I210_TQAVCC(queue)); | |
1806 | tqavcc &= ~E1000_TQAVCC_IDLESLOPE_MASK; | |
1807 | wr32(E1000_I210_TQAVCC(queue), tqavcc); | |
1808 | ||
1809 | /* Set hiCredit to zero. */ | |
1810 | wr32(E1000_I210_TQAVHC(queue), 0); | |
0364a0d0 JSP |
1811 | |
1812 | /* If CBS is not enabled for any queues anymore, then return to | |
1813 | * the default state of Data Transmission Arbitration on | |
1814 | * TQAVCTRL. | |
1815 | */ | |
1816 | if (!is_any_cbs_enabled(adapter)) { | |
1817 | tqavctrl = rd32(E1000_I210_TQAVCTRL); | |
1818 | tqavctrl &= ~E1000_TQAVCTRL_DATATRANARB; | |
1819 | wr32(E1000_I210_TQAVCTRL, tqavctrl); | |
1820 | } | |
05f9d3e1 AG |
1821 | } |
1822 | ||
3048cf84 JSP |
1823 | /* If LaunchTime is enabled, set DataTranTIM. */ |
1824 | if (ring->launchtime_enable) { | |
1825 | /* Always set DataTranTIM on TQAVCTRL if LaunchTime is enabled | |
1826 | * for any of the SR queues, and configure fetchtime delta. | |
1827 | * XXX NOTE: | |
1828 | * - LaunchTime will be enabled for all SR queues. | |
1829 | * - A fixed offset can be added relative to the launch | |
1830 | * time of all packets if configured at reg LAUNCH_OS0. | |
1831 | * We are keeping it as 0 for now (default value). | |
1832 | */ | |
1833 | tqavctrl = rd32(E1000_I210_TQAVCTRL); | |
1834 | tqavctrl |= E1000_TQAVCTRL_DATATRANTIM | | |
1835 | E1000_TQAVCTRL_FETCHTIME_DELTA; | |
1836 | wr32(E1000_I210_TQAVCTRL, tqavctrl); | |
1837 | } else { | |
1838 | /* If Launchtime is not enabled for any SR queues anymore, | |
1839 | * then clear DataTranTIM on TQAVCTRL and clear fetchtime delta, | |
1840 | * effectively disabling Launchtime. | |
1841 | */ | |
1842 | if (!is_any_txtime_enabled(adapter)) { | |
1843 | tqavctrl = rd32(E1000_I210_TQAVCTRL); | |
1844 | tqavctrl &= ~E1000_TQAVCTRL_DATATRANTIM; | |
1845 | tqavctrl &= ~E1000_TQAVCTRL_FETCHTIME_DELTA; | |
1846 | wr32(E1000_I210_TQAVCTRL, tqavctrl); | |
1847 | } | |
1848 | } | |
1849 | ||
05f9d3e1 AG |
1850 | /* XXX: In i210 controller the sendSlope and loCredit parameters from |
1851 | * CBS are not configurable by software so we don't do any 'controller | |
1852 | * configuration' in respect to these parameters. | |
1853 | */ | |
1854 | ||
4df3c543 JP |
1855 | netdev_dbg(netdev, "Qav Tx mode: cbs %s, launchtime %s, queue %d idleslope %d sendslope %d hiCredit %d locredit %d\n", |
1856 | ring->cbs_enable ? "enabled" : "disabled", | |
1857 | ring->launchtime_enable ? "enabled" : "disabled", | |
1858 | queue, | |
1859 | ring->idleslope, ring->sendslope, | |
1860 | ring->hicredit, ring->locredit); | |
05f9d3e1 AG |
1861 | } |
1862 | ||
3048cf84 JSP |
1863 | static int igb_save_txtime_params(struct igb_adapter *adapter, int queue, |
1864 | bool enable) | |
1865 | { | |
1866 | struct igb_ring *ring; | |
1867 | ||
1868 | if (queue < 0 || queue > adapter->num_tx_queues) | |
1869 | return -EINVAL; | |
1870 | ||
1871 | ring = adapter->tx_ring[queue]; | |
1872 | ring->launchtime_enable = enable; | |
1873 | ||
1874 | return 0; | |
1875 | } | |
1876 | ||
05f9d3e1 AG |
1877 | static int igb_save_cbs_params(struct igb_adapter *adapter, int queue, |
1878 | bool enable, int idleslope, int sendslope, | |
1879 | int hicredit, int locredit) | |
1880 | { | |
1881 | struct igb_ring *ring; | |
1882 | ||
1883 | if (queue < 0 || queue > adapter->num_tx_queues) | |
1884 | return -EINVAL; | |
1885 | ||
1886 | ring = adapter->tx_ring[queue]; | |
1887 | ||
1888 | ring->cbs_enable = enable; | |
1889 | ring->idleslope = idleslope; | |
1890 | ring->sendslope = sendslope; | |
1891 | ring->hicredit = hicredit; | |
1892 | ring->locredit = locredit; | |
1893 | ||
1894 | return 0; | |
1895 | } | |
1896 | ||
91db3642 JSP |
1897 | /** |
1898 | * igb_setup_tx_mode - Switch to/from Qav Tx mode when applicable | |
1899 | * @adapter: pointer to adapter struct | |
1900 | * | |
1901 | * Configure TQAVCTRL register switching the controller's Tx mode | |
1902 | * if FQTSS mode is enabled or disabled. Additionally, will issue | |
1903 | * a call to igb_config_tx_modes() per queue so any previously saved | |
1904 | * Tx parameters are applied. | |
1905 | **/ | |
05f9d3e1 AG |
1906 | static void igb_setup_tx_mode(struct igb_adapter *adapter) |
1907 | { | |
1908 | struct net_device *netdev = adapter->netdev; | |
1909 | struct e1000_hw *hw = &adapter->hw; | |
1910 | u32 val; | |
1911 | ||
1912 | /* Only i210 controller supports changing the transmission mode. */ | |
1913 | if (hw->mac.type != e1000_i210) | |
1914 | return; | |
1915 | ||
1916 | if (is_fqtss_enabled(adapter)) { | |
1917 | int i, max_queue; | |
1918 | ||
1919 | /* Configure TQAVCTRL register: set transmit mode to 'Qav', | |
3048cf84 JSP |
1920 | * set data fetch arbitration to 'round robin', set SP_WAIT_SR |
1921 | * so SP queues wait for SR ones. | |
05f9d3e1 AG |
1922 | */ |
1923 | val = rd32(E1000_I210_TQAVCTRL); | |
3048cf84 | 1924 | val |= E1000_TQAVCTRL_XMIT_MODE | E1000_TQAVCTRL_SP_WAIT_SR; |
05f9d3e1 AG |
1925 | val &= ~E1000_TQAVCTRL_DATAFETCHARB; |
1926 | wr32(E1000_I210_TQAVCTRL, val); | |
1927 | ||
1928 | /* Configure Tx and Rx packet buffers sizes as described in | |
1929 | * i210 datasheet section 7.2.7.7. | |
1930 | */ | |
1931 | val = rd32(E1000_TXPBS); | |
1932 | val &= ~I210_TXPBSIZE_MASK; | |
1933 | val |= I210_TXPBSIZE_PB0_8KB | I210_TXPBSIZE_PB1_8KB | | |
1934 | I210_TXPBSIZE_PB2_4KB | I210_TXPBSIZE_PB3_4KB; | |
1935 | wr32(E1000_TXPBS, val); | |
1936 | ||
1937 | val = rd32(E1000_RXPBS); | |
1938 | val &= ~I210_RXPBSIZE_MASK; | |
6f9ae175 | 1939 | val |= I210_RXPBSIZE_PB_30KB; |
05f9d3e1 AG |
1940 | wr32(E1000_RXPBS, val); |
1941 | ||
1942 | /* Section 8.12.9 states that MAX_TPKT_SIZE from DTXMXPKTSZ | |
1943 | * register should not exceed the buffer size programmed in | |
1944 | * TXPBS. The smallest buffer size programmed in TXPBS is 4kB | |
1945 | * so according to the datasheet we should set MAX_TPKT_SIZE to | |
1946 | * 4kB / 64. | |
1947 | * | |
1948 | * However, when we do so, no frame from queue 2 and 3 are | |
1949 | * transmitted. It seems the MAX_TPKT_SIZE should not be great | |
1950 | * or _equal_ to the buffer size programmed in TXPBS. For this | |
1951 | * reason, we set set MAX_ TPKT_SIZE to (4kB - 1) / 64. | |
1952 | */ | |
1953 | val = (4096 - 1) / 64; | |
1954 | wr32(E1000_I210_DTXMXPKTSZ, val); | |
1955 | ||
1956 | /* Since FQTSS mode is enabled, apply any CBS configuration | |
1957 | * previously set. If no previous CBS configuration has been | |
1958 | * done, then the initial configuration is applied, which means | |
1959 | * CBS is disabled. | |
1960 | */ | |
1961 | max_queue = (adapter->num_tx_queues < I210_SR_QUEUES_NUM) ? | |
1962 | adapter->num_tx_queues : I210_SR_QUEUES_NUM; | |
1963 | ||
1964 | for (i = 0; i < max_queue; i++) { | |
91db3642 | 1965 | igb_config_tx_modes(adapter, i); |
05f9d3e1 AG |
1966 | } |
1967 | } else { | |
1968 | wr32(E1000_RXPBS, I210_RXPBSIZE_DEFAULT); | |
1969 | wr32(E1000_TXPBS, I210_TXPBSIZE_DEFAULT); | |
1970 | wr32(E1000_I210_DTXMXPKTSZ, I210_DTXMXPKTSZ_DEFAULT); | |
1971 | ||
1972 | val = rd32(E1000_I210_TQAVCTRL); | |
1973 | /* According to Section 8.12.21, the other flags we've set when | |
1974 | * enabling FQTSS are not relevant when disabling FQTSS so we | |
1975 | * don't set they here. | |
1976 | */ | |
1977 | val &= ~E1000_TQAVCTRL_XMIT_MODE; | |
1978 | wr32(E1000_I210_TQAVCTRL, val); | |
1979 | } | |
1980 | ||
1981 | netdev_dbg(netdev, "FQTSS %s\n", (is_fqtss_enabled(adapter)) ? | |
1982 | "enabled" : "disabled"); | |
1983 | } | |
1984 | ||
9d5c8243 | 1985 | /** |
b980ac18 JK |
1986 | * igb_configure - configure the hardware for RX and TX |
1987 | * @adapter: private board structure | |
9d5c8243 AK |
1988 | **/ |
1989 | static void igb_configure(struct igb_adapter *adapter) | |
1990 | { | |
1991 | struct net_device *netdev = adapter->netdev; | |
1992 | int i; | |
1993 | ||
1994 | igb_get_hw_control(adapter); | |
ff41f8dc | 1995 | igb_set_rx_mode(netdev); |
05f9d3e1 | 1996 | igb_setup_tx_mode(adapter); |
9d5c8243 AK |
1997 | |
1998 | igb_restore_vlan(adapter); | |
9d5c8243 | 1999 | |
85b430b4 | 2000 | igb_setup_tctl(adapter); |
06cf2666 | 2001 | igb_setup_mrqc(adapter); |
9d5c8243 | 2002 | igb_setup_rctl(adapter); |
85b430b4 | 2003 | |
0e71def2 | 2004 | igb_nfc_filter_restore(adapter); |
85b430b4 | 2005 | igb_configure_tx(adapter); |
9d5c8243 | 2006 | igb_configure_rx(adapter); |
662d7205 AD |
2007 | |
2008 | igb_rx_fifo_flush_82575(&adapter->hw); | |
2009 | ||
c493ea45 | 2010 | /* call igb_desc_unused which always leaves |
9d5c8243 | 2011 | * at least 1 descriptor unused to make sure |
b980ac18 JK |
2012 | * next_to_use != next_to_clean |
2013 | */ | |
9d5c8243 | 2014 | for (i = 0; i < adapter->num_rx_queues; i++) { |
3025a446 | 2015 | struct igb_ring *ring = adapter->rx_ring[i]; |
cd392f5c | 2016 | igb_alloc_rx_buffers(ring, igb_desc_unused(ring)); |
9d5c8243 | 2017 | } |
9d5c8243 AK |
2018 | } |
2019 | ||
88a268c1 | 2020 | /** |
b980ac18 JK |
2021 | * igb_power_up_link - Power up the phy/serdes link |
2022 | * @adapter: address of board private structure | |
88a268c1 NN |
2023 | **/ |
2024 | void igb_power_up_link(struct igb_adapter *adapter) | |
2025 | { | |
76886596 AA |
2026 | igb_reset_phy(&adapter->hw); |
2027 | ||
88a268c1 NN |
2028 | if (adapter->hw.phy.media_type == e1000_media_type_copper) |
2029 | igb_power_up_phy_copper(&adapter->hw); | |
2030 | else | |
2031 | igb_power_up_serdes_link_82575(&adapter->hw); | |
aec653c4 TF |
2032 | |
2033 | igb_setup_link(&adapter->hw); | |
88a268c1 NN |
2034 | } |
2035 | ||
2036 | /** | |
b980ac18 JK |
2037 | * igb_power_down_link - Power down the phy/serdes link |
2038 | * @adapter: address of board private structure | |
88a268c1 NN |
2039 | */ |
2040 | static void igb_power_down_link(struct igb_adapter *adapter) | |
2041 | { | |
2042 | if (adapter->hw.phy.media_type == e1000_media_type_copper) | |
2043 | igb_power_down_phy_copper_82575(&adapter->hw); | |
2044 | else | |
2045 | igb_shutdown_serdes_link_82575(&adapter->hw); | |
2046 | } | |
9d5c8243 | 2047 | |
56cec249 CW |
2048 | /** |
2049 | * Detect and switch function for Media Auto Sense | |
2050 | * @adapter: address of the board private structure | |
2051 | **/ | |
2052 | static void igb_check_swap_media(struct igb_adapter *adapter) | |
2053 | { | |
2054 | struct e1000_hw *hw = &adapter->hw; | |
2055 | u32 ctrl_ext, connsw; | |
2056 | bool swap_now = false; | |
2057 | ||
2058 | ctrl_ext = rd32(E1000_CTRL_EXT); | |
2059 | connsw = rd32(E1000_CONNSW); | |
2060 | ||
2061 | /* need to live swap if current media is copper and we have fiber/serdes | |
2062 | * to go to. | |
2063 | */ | |
2064 | ||
2065 | if ((hw->phy.media_type == e1000_media_type_copper) && | |
2066 | (!(connsw & E1000_CONNSW_AUTOSENSE_EN))) { | |
2067 | swap_now = true; | |
8d5cfd7f MR |
2068 | } else if ((hw->phy.media_type != e1000_media_type_copper) && |
2069 | !(connsw & E1000_CONNSW_SERDESD)) { | |
56cec249 CW |
2070 | /* copper signal takes time to appear */ |
2071 | if (adapter->copper_tries < 4) { | |
2072 | adapter->copper_tries++; | |
2073 | connsw |= E1000_CONNSW_AUTOSENSE_CONF; | |
2074 | wr32(E1000_CONNSW, connsw); | |
2075 | return; | |
2076 | } else { | |
2077 | adapter->copper_tries = 0; | |
2078 | if ((connsw & E1000_CONNSW_PHYSD) && | |
2079 | (!(connsw & E1000_CONNSW_PHY_PDN))) { | |
2080 | swap_now = true; | |
2081 | connsw &= ~E1000_CONNSW_AUTOSENSE_CONF; | |
2082 | wr32(E1000_CONNSW, connsw); | |
2083 | } | |
2084 | } | |
2085 | } | |
2086 | ||
2087 | if (!swap_now) | |
2088 | return; | |
2089 | ||
2090 | switch (hw->phy.media_type) { | |
2091 | case e1000_media_type_copper: | |
2092 | netdev_info(adapter->netdev, | |
2093 | "MAS: changing media to fiber/serdes\n"); | |
2094 | ctrl_ext |= | |
2095 | E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES; | |
2096 | adapter->flags |= IGB_FLAG_MEDIA_RESET; | |
2097 | adapter->copper_tries = 0; | |
2098 | break; | |
2099 | case e1000_media_type_internal_serdes: | |
2100 | case e1000_media_type_fiber: | |
2101 | netdev_info(adapter->netdev, | |
2102 | "MAS: changing media to copper\n"); | |
2103 | ctrl_ext &= | |
2104 | ~E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES; | |
2105 | adapter->flags |= IGB_FLAG_MEDIA_RESET; | |
2106 | break; | |
2107 | default: | |
2108 | /* shouldn't get here during regular operation */ | |
2109 | netdev_err(adapter->netdev, | |
2110 | "AMS: Invalid media type found, returning\n"); | |
2111 | break; | |
2112 | } | |
2113 | wr32(E1000_CTRL_EXT, ctrl_ext); | |
2114 | } | |
2115 | ||
9d5c8243 | 2116 | /** |
b980ac18 JK |
2117 | * igb_up - Open the interface and prepare it to handle traffic |
2118 | * @adapter: board private structure | |
9d5c8243 | 2119 | **/ |
9d5c8243 AK |
2120 | int igb_up(struct igb_adapter *adapter) |
2121 | { | |
2122 | struct e1000_hw *hw = &adapter->hw; | |
2123 | int i; | |
2124 | ||
2125 | /* hardware has been reset, we need to reload some things */ | |
2126 | igb_configure(adapter); | |
2127 | ||
2128 | clear_bit(__IGB_DOWN, &adapter->state); | |
2129 | ||
0d1ae7f4 AD |
2130 | for (i = 0; i < adapter->num_q_vectors; i++) |
2131 | napi_enable(&(adapter->q_vector[i]->napi)); | |
2132 | ||
cd14ef54 | 2133 | if (adapter->flags & IGB_FLAG_HAS_MSIX) |
9d5c8243 | 2134 | igb_configure_msix(adapter); |
feeb2721 AD |
2135 | else |
2136 | igb_assign_vector(adapter->q_vector[0], 0); | |
9d5c8243 AK |
2137 | |
2138 | /* Clear any pending interrupts. */ | |
1ec2297c | 2139 | rd32(E1000_TSICR); |
9d5c8243 AK |
2140 | rd32(E1000_ICR); |
2141 | igb_irq_enable(adapter); | |
2142 | ||
d4960307 AD |
2143 | /* notify VFs that reset has been completed */ |
2144 | if (adapter->vfs_allocated_count) { | |
2145 | u32 reg_data = rd32(E1000_CTRL_EXT); | |
9005df38 | 2146 | |
d4960307 AD |
2147 | reg_data |= E1000_CTRL_EXT_PFRSTD; |
2148 | wr32(E1000_CTRL_EXT, reg_data); | |
2149 | } | |
2150 | ||
4cb9be7a JB |
2151 | netif_tx_start_all_queues(adapter->netdev); |
2152 | ||
25568a53 AD |
2153 | /* start the watchdog. */ |
2154 | hw->mac.get_link_status = 1; | |
2155 | schedule_work(&adapter->watchdog_task); | |
2156 | ||
f4c01e96 CW |
2157 | if ((adapter->flags & IGB_FLAG_EEE) && |
2158 | (!hw->dev_spec._82575.eee_disable)) | |
2159 | adapter->eee_advert = MDIO_EEE_100TX | MDIO_EEE_1000T; | |
2160 | ||
9d5c8243 AK |
2161 | return 0; |
2162 | } | |
2163 | ||
2164 | void igb_down(struct igb_adapter *adapter) | |
2165 | { | |
9d5c8243 | 2166 | struct net_device *netdev = adapter->netdev; |
330a6d6a | 2167 | struct e1000_hw *hw = &adapter->hw; |
9d5c8243 AK |
2168 | u32 tctl, rctl; |
2169 | int i; | |
2170 | ||
2171 | /* signal that we're down so the interrupt handler does not | |
b980ac18 JK |
2172 | * reschedule our watchdog timer |
2173 | */ | |
9d5c8243 AK |
2174 | set_bit(__IGB_DOWN, &adapter->state); |
2175 | ||
2176 | /* disable receives in the hardware */ | |
2177 | rctl = rd32(E1000_RCTL); | |
2178 | wr32(E1000_RCTL, rctl & ~E1000_RCTL_EN); | |
2179 | /* flush and sleep below */ | |
2180 | ||
94221ae7 GH |
2181 | igb_nfc_filter_exit(adapter); |
2182 | ||
f28ea083 | 2183 | netif_carrier_off(netdev); |
fd2ea0a7 | 2184 | netif_tx_stop_all_queues(netdev); |
9d5c8243 AK |
2185 | |
2186 | /* disable transmits in the hardware */ | |
2187 | tctl = rd32(E1000_TCTL); | |
2188 | tctl &= ~E1000_TCTL_EN; | |
2189 | wr32(E1000_TCTL, tctl); | |
2190 | /* flush both disables and wait for them to finish */ | |
2191 | wrfl(); | |
0d451e79 | 2192 | usleep_range(10000, 11000); |
9d5c8243 | 2193 | |
41f149a2 CW |
2194 | igb_irq_disable(adapter); |
2195 | ||
aa9b8cc4 AA |
2196 | adapter->flags &= ~IGB_FLAG_NEED_LINK_UPDATE; |
2197 | ||
41f149a2 | 2198 | for (i = 0; i < adapter->num_q_vectors; i++) { |
17a402a0 CW |
2199 | if (adapter->q_vector[i]) { |
2200 | napi_synchronize(&adapter->q_vector[i]->napi); | |
2201 | napi_disable(&adapter->q_vector[i]->napi); | |
2202 | } | |
41f149a2 | 2203 | } |
9d5c8243 | 2204 | |
9d5c8243 AK |
2205 | del_timer_sync(&adapter->watchdog_timer); |
2206 | del_timer_sync(&adapter->phy_info_timer); | |
2207 | ||
04fe6358 | 2208 | /* record the stats before reset*/ |
5642e27b | 2209 | spin_lock(&adapter->stats64_lock); |
81e3f64a | 2210 | igb_update_stats(adapter); |
5642e27b | 2211 | spin_unlock(&adapter->stats64_lock); |
04fe6358 | 2212 | |
9d5c8243 AK |
2213 | adapter->link_speed = 0; |
2214 | adapter->link_duplex = 0; | |
2215 | ||
3023682e JK |
2216 | if (!pci_channel_offline(adapter->pdev)) |
2217 | igb_reset(adapter); | |
16903caa AD |
2218 | |
2219 | /* clear VLAN promisc flag so VFTA will be updated if necessary */ | |
2220 | adapter->flags &= ~IGB_FLAG_VLAN_PROMISC; | |
2221 | ||
9d5c8243 AK |
2222 | igb_clean_all_tx_rings(adapter); |
2223 | igb_clean_all_rx_rings(adapter); | |
7e0e99ef AD |
2224 | #ifdef CONFIG_IGB_DCA |
2225 | ||
2226 | /* since we reset the hardware DCA settings were cleared */ | |
2227 | igb_setup_dca(adapter); | |
2228 | #endif | |
9d5c8243 AK |
2229 | } |
2230 | ||
2231 | void igb_reinit_locked(struct igb_adapter *adapter) | |
2232 | { | |
2233 | WARN_ON(in_interrupt()); | |
2234 | while (test_and_set_bit(__IGB_RESETTING, &adapter->state)) | |
0d451e79 | 2235 | usleep_range(1000, 2000); |
9d5c8243 AK |
2236 | igb_down(adapter); |
2237 | igb_up(adapter); | |
2238 | clear_bit(__IGB_RESETTING, &adapter->state); | |
2239 | } | |
2240 | ||
56cec249 CW |
2241 | /** igb_enable_mas - Media Autosense re-enable after swap |
2242 | * | |
2243 | * @adapter: adapter struct | |
2244 | **/ | |
8cfb879d | 2245 | static void igb_enable_mas(struct igb_adapter *adapter) |
56cec249 CW |
2246 | { |
2247 | struct e1000_hw *hw = &adapter->hw; | |
8cfb879d | 2248 | u32 connsw = rd32(E1000_CONNSW); |
56cec249 CW |
2249 | |
2250 | /* configure for SerDes media detect */ | |
8cfb879d TF |
2251 | if ((hw->phy.media_type == e1000_media_type_copper) && |
2252 | (!(connsw & E1000_CONNSW_SERDESD))) { | |
56cec249 CW |
2253 | connsw |= E1000_CONNSW_ENRGSRC; |
2254 | connsw |= E1000_CONNSW_AUTOSENSE_EN; | |
2255 | wr32(E1000_CONNSW, connsw); | |
2256 | wrfl(); | |
56cec249 | 2257 | } |
56cec249 CW |
2258 | } |
2259 | ||
9d5c8243 AK |
2260 | void igb_reset(struct igb_adapter *adapter) |
2261 | { | |
090b1795 | 2262 | struct pci_dev *pdev = adapter->pdev; |
9d5c8243 | 2263 | struct e1000_hw *hw = &adapter->hw; |
2d064c06 AD |
2264 | struct e1000_mac_info *mac = &hw->mac; |
2265 | struct e1000_fc_info *fc = &hw->fc; | |
45693bcb | 2266 | u32 pba, hwm; |
9d5c8243 AK |
2267 | |
2268 | /* Repartition Pba for greater than 9k mtu | |
2269 | * To take effect CTRL.RST is required. | |
2270 | */ | |
fa4dfae0 | 2271 | switch (mac->type) { |
d2ba2ed8 | 2272 | case e1000_i350: |
ceb5f13b | 2273 | case e1000_i354: |
55cac248 AD |
2274 | case e1000_82580: |
2275 | pba = rd32(E1000_RXPBS); | |
2276 | pba = igb_rxpbs_adjust_82580(pba); | |
2277 | break; | |
fa4dfae0 | 2278 | case e1000_82576: |
d249be54 AD |
2279 | pba = rd32(E1000_RXPBS); |
2280 | pba &= E1000_RXPBS_SIZE_MASK_82576; | |
fa4dfae0 AD |
2281 | break; |
2282 | case e1000_82575: | |
f96a8a0b CW |
2283 | case e1000_i210: |
2284 | case e1000_i211: | |
fa4dfae0 AD |
2285 | default: |
2286 | pba = E1000_PBA_34K; | |
2287 | break; | |
2d064c06 | 2288 | } |
9d5c8243 | 2289 | |
45693bcb AD |
2290 | if (mac->type == e1000_82575) { |
2291 | u32 min_rx_space, min_tx_space, needed_tx_space; | |
2292 | ||
2293 | /* write Rx PBA so that hardware can report correct Tx PBA */ | |
9d5c8243 AK |
2294 | wr32(E1000_PBA, pba); |
2295 | ||
2296 | /* To maintain wire speed transmits, the Tx FIFO should be | |
2297 | * large enough to accommodate two full transmit packets, | |
2298 | * rounded up to the next 1KB and expressed in KB. Likewise, | |
2299 | * the Rx FIFO should be large enough to accommodate at least | |
2300 | * one full receive packet and is similarly rounded up and | |
b980ac18 JK |
2301 | * expressed in KB. |
2302 | */ | |
45693bcb AD |
2303 | min_rx_space = DIV_ROUND_UP(MAX_JUMBO_FRAME_SIZE, 1024); |
2304 | ||
2305 | /* The Tx FIFO also stores 16 bytes of information about the Tx | |
2306 | * but don't include Ethernet FCS because hardware appends it. | |
2307 | * We only need to round down to the nearest 512 byte block | |
2308 | * count since the value we care about is 2 frames, not 1. | |
b980ac18 | 2309 | */ |
45693bcb AD |
2310 | min_tx_space = adapter->max_frame_size; |
2311 | min_tx_space += sizeof(union e1000_adv_tx_desc) - ETH_FCS_LEN; | |
2312 | min_tx_space = DIV_ROUND_UP(min_tx_space, 512); | |
2313 | ||
2314 | /* upper 16 bits has Tx packet buffer allocation size in KB */ | |
2315 | needed_tx_space = min_tx_space - (rd32(E1000_PBA) >> 16); | |
9d5c8243 AK |
2316 | |
2317 | /* If current Tx allocation is less than the min Tx FIFO size, | |
2318 | * and the min Tx FIFO size is less than the current Rx FIFO | |
45693bcb | 2319 | * allocation, take space away from current Rx allocation. |
b980ac18 | 2320 | */ |
45693bcb AD |
2321 | if (needed_tx_space < pba) { |
2322 | pba -= needed_tx_space; | |
9d5c8243 | 2323 | |
b980ac18 JK |
2324 | /* if short on Rx space, Rx wins and must trump Tx |
2325 | * adjustment | |
2326 | */ | |
9d5c8243 AK |
2327 | if (pba < min_rx_space) |
2328 | pba = min_rx_space; | |
2329 | } | |
45693bcb AD |
2330 | |
2331 | /* adjust PBA for jumbo frames */ | |
2d064c06 | 2332 | wr32(E1000_PBA, pba); |
9d5c8243 | 2333 | } |
9d5c8243 | 2334 | |
45693bcb AD |
2335 | /* flow control settings |
2336 | * The high water mark must be low enough to fit one full frame | |
2337 | * after transmitting the pause frame. As such we must have enough | |
2338 | * space to allow for us to complete our current transmit and then | |
2339 | * receive the frame that is in progress from the link partner. | |
2340 | * Set it to: | |
2341 | * - the full Rx FIFO size minus one full Tx plus one full Rx frame | |
b980ac18 | 2342 | */ |
45693bcb | 2343 | hwm = (pba << 10) - (adapter->max_frame_size + MAX_JUMBO_FRAME_SIZE); |
9d5c8243 | 2344 | |
d48507fe | 2345 | fc->high_water = hwm & 0xFFFFFFF0; /* 16-byte granularity */ |
d405ea3e | 2346 | fc->low_water = fc->high_water - 16; |
9d5c8243 AK |
2347 | fc->pause_time = 0xFFFF; |
2348 | fc->send_xon = 1; | |
0cce119a | 2349 | fc->current_mode = fc->requested_mode; |
9d5c8243 | 2350 | |
4ae196df AD |
2351 | /* disable receive for all VFs and wait one second */ |
2352 | if (adapter->vfs_allocated_count) { | |
2353 | int i; | |
9005df38 | 2354 | |
4ae196df | 2355 | for (i = 0 ; i < adapter->vfs_allocated_count; i++) |
8fa7e0f7 | 2356 | adapter->vf_data[i].flags &= IGB_VF_FLAG_PF_SET_MAC; |
4ae196df AD |
2357 | |
2358 | /* ping all the active vfs to let them know we are going down */ | |
f2ca0dbe | 2359 | igb_ping_all_vfs(adapter); |
4ae196df AD |
2360 | |
2361 | /* disable transmits and receives */ | |
2362 | wr32(E1000_VFRE, 0); | |
2363 | wr32(E1000_VFTE, 0); | |
2364 | } | |
2365 | ||
9d5c8243 | 2366 | /* Allow time for pending master requests to run */ |
330a6d6a | 2367 | hw->mac.ops.reset_hw(hw); |
9d5c8243 AK |
2368 | wr32(E1000_WUC, 0); |
2369 | ||
56cec249 CW |
2370 | if (adapter->flags & IGB_FLAG_MEDIA_RESET) { |
2371 | /* need to resetup here after media swap */ | |
2372 | adapter->ei.get_invariants(hw); | |
2373 | adapter->flags &= ~IGB_FLAG_MEDIA_RESET; | |
2374 | } | |
fb2308ba | 2375 | if ((mac->type == e1000_82575 || mac->type == e1000_i350) && |
8cfb879d TF |
2376 | (adapter->flags & IGB_FLAG_MAS_ENABLE)) { |
2377 | igb_enable_mas(adapter); | |
56cec249 | 2378 | } |
330a6d6a | 2379 | if (hw->mac.ops.init_hw(hw)) |
090b1795 | 2380 | dev_err(&pdev->dev, "Hardware Error\n"); |
831ec0b4 | 2381 | |
83c21335 YK |
2382 | /* RAR registers were cleared during init_hw, clear mac table */ |
2383 | igb_flush_mac_table(adapter); | |
2384 | __dev_uc_unsync(adapter->netdev, NULL); | |
2385 | ||
2386 | /* Recover default RAR entry */ | |
2387 | igb_set_default_mac_filter(adapter); | |
2388 | ||
b980ac18 | 2389 | /* Flow control settings reset on hardware reset, so guarantee flow |
a27416bb MV |
2390 | * control is off when forcing speed. |
2391 | */ | |
2392 | if (!hw->mac.autoneg) | |
2393 | igb_force_mac_fc(hw); | |
2394 | ||
b6e0c419 | 2395 | igb_init_dmac(adapter, pba); |
e428893b CW |
2396 | #ifdef CONFIG_IGB_HWMON |
2397 | /* Re-initialize the thermal sensor on i350 devices. */ | |
2398 | if (!test_bit(__IGB_DOWN, &adapter->state)) { | |
2399 | if (mac->type == e1000_i350 && hw->bus.func == 0) { | |
2400 | /* If present, re-initialize the external thermal sensor | |
2401 | * interface. | |
2402 | */ | |
2403 | if (adapter->ets) | |
2404 | mac->ops.init_thermal_sensor_thresh(hw); | |
2405 | } | |
2406 | } | |
2407 | #endif | |
b936136d | 2408 | /* Re-establish EEE setting */ |
f4c01e96 CW |
2409 | if (hw->phy.media_type == e1000_media_type_copper) { |
2410 | switch (mac->type) { | |
2411 | case e1000_i350: | |
2412 | case e1000_i210: | |
2413 | case e1000_i211: | |
c4c112f1 | 2414 | igb_set_eee_i350(hw, true, true); |
f4c01e96 CW |
2415 | break; |
2416 | case e1000_i354: | |
c4c112f1 | 2417 | igb_set_eee_i354(hw, true, true); |
f4c01e96 CW |
2418 | break; |
2419 | default: | |
2420 | break; | |
2421 | } | |
2422 | } | |
88a268c1 NN |
2423 | if (!netif_running(adapter->netdev)) |
2424 | igb_power_down_link(adapter); | |
2425 | ||
9d5c8243 AK |
2426 | igb_update_mng_vlan(adapter); |
2427 | ||
2428 | /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */ | |
2429 | wr32(E1000_VET, ETHERNET_IEEE_VLAN_TYPE); | |
2430 | ||
1f6e8178 | 2431 | /* Re-enable PTP, where applicable. */ |
4f3ce71b JK |
2432 | if (adapter->ptp_flags & IGB_PTP_ENABLED) |
2433 | igb_ptp_reset(adapter); | |
1f6e8178 | 2434 | |
330a6d6a | 2435 | igb_get_phy_info(hw); |
9d5c8243 AK |
2436 | } |
2437 | ||
c8f44aff MM |
2438 | static netdev_features_t igb_fix_features(struct net_device *netdev, |
2439 | netdev_features_t features) | |
b2cb09b1 | 2440 | { |
b980ac18 JK |
2441 | /* Since there is no support for separate Rx/Tx vlan accel |
2442 | * enable/disable make sure Tx flag is always in same state as Rx. | |
b2cb09b1 | 2443 | */ |
f646968f PM |
2444 | if (features & NETIF_F_HW_VLAN_CTAG_RX) |
2445 | features |= NETIF_F_HW_VLAN_CTAG_TX; | |
b2cb09b1 | 2446 | else |
f646968f | 2447 | features &= ~NETIF_F_HW_VLAN_CTAG_TX; |
b2cb09b1 JP |
2448 | |
2449 | return features; | |
2450 | } | |
2451 | ||
c8f44aff MM |
2452 | static int igb_set_features(struct net_device *netdev, |
2453 | netdev_features_t features) | |
ac52caa3 | 2454 | { |
c8f44aff | 2455 | netdev_features_t changed = netdev->features ^ features; |
89eaefb6 | 2456 | struct igb_adapter *adapter = netdev_priv(netdev); |
ac52caa3 | 2457 | |
f646968f | 2458 | if (changed & NETIF_F_HW_VLAN_CTAG_RX) |
b2cb09b1 JP |
2459 | igb_vlan_mode(netdev, features); |
2460 | ||
16903caa | 2461 | if (!(changed & (NETIF_F_RXALL | NETIF_F_NTUPLE))) |
89eaefb6 BG |
2462 | return 0; |
2463 | ||
0e71def2 GH |
2464 | if (!(features & NETIF_F_NTUPLE)) { |
2465 | struct hlist_node *node2; | |
2466 | struct igb_nfc_filter *rule; | |
2467 | ||
2468 | spin_lock(&adapter->nfc_lock); | |
2469 | hlist_for_each_entry_safe(rule, node2, | |
2470 | &adapter->nfc_filter_list, nfc_node) { | |
2471 | igb_erase_filter(adapter, rule); | |
2472 | hlist_del(&rule->nfc_node); | |
2473 | kfree(rule); | |
2474 | } | |
2475 | spin_unlock(&adapter->nfc_lock); | |
2476 | adapter->nfc_filter_count = 0; | |
2477 | } | |
2478 | ||
89eaefb6 BG |
2479 | netdev->features = features; |
2480 | ||
2481 | if (netif_running(netdev)) | |
2482 | igb_reinit_locked(adapter); | |
2483 | else | |
2484 | igb_reset(adapter); | |
2485 | ||
b0ddfe2b | 2486 | return 1; |
ac52caa3 MM |
2487 | } |
2488 | ||
268f9d33 AD |
2489 | static int igb_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], |
2490 | struct net_device *dev, | |
2491 | const unsigned char *addr, u16 vid, | |
87b0984e PM |
2492 | u16 flags, |
2493 | struct netlink_ext_ack *extack) | |
268f9d33 AD |
2494 | { |
2495 | /* guarantee we can provide a unique filter for the unicast address */ | |
2496 | if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) { | |
2497 | struct igb_adapter *adapter = netdev_priv(dev); | |
268f9d33 | 2498 | int vfn = adapter->vfs_allocated_count; |
268f9d33 | 2499 | |
83c21335 | 2500 | if (netdev_uc_count(dev) >= igb_available_rars(adapter, vfn)) |
268f9d33 AD |
2501 | return -ENOMEM; |
2502 | } | |
2503 | ||
2504 | return ndo_dflt_fdb_add(ndm, tb, dev, addr, vid, flags); | |
2505 | } | |
2506 | ||
e10715d3 AD |
2507 | #define IGB_MAX_MAC_HDR_LEN 127 |
2508 | #define IGB_MAX_NETWORK_HDR_LEN 511 | |
2509 | ||
2510 | static netdev_features_t | |
2511 | igb_features_check(struct sk_buff *skb, struct net_device *dev, | |
2512 | netdev_features_t features) | |
2513 | { | |
2514 | unsigned int network_hdr_len, mac_hdr_len; | |
2515 | ||
2516 | /* Make certain the headers can be described by a context descriptor */ | |
2517 | mac_hdr_len = skb_network_header(skb) - skb->data; | |
2518 | if (unlikely(mac_hdr_len > IGB_MAX_MAC_HDR_LEN)) | |
2519 | return features & ~(NETIF_F_HW_CSUM | | |
2520 | NETIF_F_SCTP_CRC | | |
4085d06d | 2521 | NETIF_F_GSO_UDP_L4 | |
e10715d3 AD |
2522 | NETIF_F_HW_VLAN_CTAG_TX | |
2523 | NETIF_F_TSO | | |
2524 | NETIF_F_TSO6); | |
2525 | ||
2526 | network_hdr_len = skb_checksum_start(skb) - skb_network_header(skb); | |
2527 | if (unlikely(network_hdr_len > IGB_MAX_NETWORK_HDR_LEN)) | |
2528 | return features & ~(NETIF_F_HW_CSUM | | |
2529 | NETIF_F_SCTP_CRC | | |
4085d06d | 2530 | NETIF_F_GSO_UDP_L4 | |
e10715d3 AD |
2531 | NETIF_F_TSO | |
2532 | NETIF_F_TSO6); | |
2533 | ||
2534 | /* We can only support IPV4 TSO in tunnels if we can mangle the | |
2535 | * inner IP ID field, so strip TSO if MANGLEID is not supported. | |
2536 | */ | |
2537 | if (skb->encapsulation && !(features & NETIF_F_TSO_MANGLEID)) | |
2538 | features &= ~NETIF_F_TSO; | |
2539 | ||
2540 | return features; | |
2541 | } | |
2542 | ||
8080e6ab JSP |
2543 | static void igb_offload_apply(struct igb_adapter *adapter, s32 queue) |
2544 | { | |
2545 | if (!is_fqtss_enabled(adapter)) { | |
2546 | enable_fqtss(adapter, true); | |
2547 | return; | |
2548 | } | |
2549 | ||
2550 | igb_config_tx_modes(adapter, queue); | |
2551 | ||
3048cf84 | 2552 | if (!is_any_cbs_enabled(adapter) && !is_any_txtime_enabled(adapter)) |
8080e6ab JSP |
2553 | enable_fqtss(adapter, false); |
2554 | } | |
2555 | ||
05f9d3e1 AG |
2556 | static int igb_offload_cbs(struct igb_adapter *adapter, |
2557 | struct tc_cbs_qopt_offload *qopt) | |
2558 | { | |
2559 | struct e1000_hw *hw = &adapter->hw; | |
2560 | int err; | |
2561 | ||
2562 | /* CBS offloading is only supported by i210 controller. */ | |
2563 | if (hw->mac.type != e1000_i210) | |
2564 | return -EOPNOTSUPP; | |
2565 | ||
2566 | /* CBS offloading is only supported by queue 0 and queue 1. */ | |
2567 | if (qopt->queue < 0 || qopt->queue > 1) | |
2568 | return -EINVAL; | |
2569 | ||
2570 | err = igb_save_cbs_params(adapter, qopt->queue, qopt->enable, | |
2571 | qopt->idleslope, qopt->sendslope, | |
2572 | qopt->hicredit, qopt->locredit); | |
2573 | if (err) | |
2574 | return err; | |
2575 | ||
8080e6ab | 2576 | igb_offload_apply(adapter, qopt->queue); |
05f9d3e1 AG |
2577 | |
2578 | return 0; | |
2579 | } | |
2580 | ||
e086be9a VCG |
2581 | #define ETHER_TYPE_FULL_MASK ((__force __be16)~0) |
2582 | #define VLAN_PRIO_FULL_MASK (0x07) | |
2583 | ||
2584 | static int igb_parse_cls_flower(struct igb_adapter *adapter, | |
f9e30088 | 2585 | struct flow_cls_offload *f, |
e086be9a VCG |
2586 | int traffic_class, |
2587 | struct igb_nfc_filter *input) | |
2588 | { | |
f9e30088 | 2589 | struct flow_rule *rule = flow_cls_offload_flow_rule(f); |
8f256622 | 2590 | struct flow_dissector *dissector = rule->match.dissector; |
e086be9a VCG |
2591 | struct netlink_ext_ack *extack = f->common.extack; |
2592 | ||
8f256622 | 2593 | if (dissector->used_keys & |
e086be9a VCG |
2594 | ~(BIT(FLOW_DISSECTOR_KEY_BASIC) | |
2595 | BIT(FLOW_DISSECTOR_KEY_CONTROL) | | |
2596 | BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) | | |
2597 | BIT(FLOW_DISSECTOR_KEY_VLAN))) { | |
2598 | NL_SET_ERR_MSG_MOD(extack, | |
2599 | "Unsupported key used, only BASIC, CONTROL, ETH_ADDRS and VLAN are supported"); | |
2600 | return -EOPNOTSUPP; | |
2601 | } | |
2602 | ||
8f256622 PNA |
2603 | if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { |
2604 | struct flow_match_eth_addrs match; | |
e086be9a | 2605 | |
8f256622 PNA |
2606 | flow_rule_match_eth_addrs(rule, &match); |
2607 | if (!is_zero_ether_addr(match.mask->dst)) { | |
2608 | if (!is_broadcast_ether_addr(match.mask->dst)) { | |
e086be9a VCG |
2609 | NL_SET_ERR_MSG_MOD(extack, "Only full masks are supported for destination MAC address"); |
2610 | return -EINVAL; | |
2611 | } | |
2612 | ||
2613 | input->filter.match_flags |= | |
2614 | IGB_FILTER_FLAG_DST_MAC_ADDR; | |
8f256622 | 2615 | ether_addr_copy(input->filter.dst_addr, match.key->dst); |
e086be9a VCG |
2616 | } |
2617 | ||
8f256622 PNA |
2618 | if (!is_zero_ether_addr(match.mask->src)) { |
2619 | if (!is_broadcast_ether_addr(match.mask->src)) { | |
e086be9a VCG |
2620 | NL_SET_ERR_MSG_MOD(extack, "Only full masks are supported for source MAC address"); |
2621 | return -EINVAL; | |
2622 | } | |
2623 | ||
2624 | input->filter.match_flags |= | |
2625 | IGB_FILTER_FLAG_SRC_MAC_ADDR; | |
8f256622 | 2626 | ether_addr_copy(input->filter.src_addr, match.key->src); |
e086be9a VCG |
2627 | } |
2628 | } | |
2629 | ||
8f256622 PNA |
2630 | if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) { |
2631 | struct flow_match_basic match; | |
e086be9a | 2632 | |
8f256622 PNA |
2633 | flow_rule_match_basic(rule, &match); |
2634 | if (match.mask->n_proto) { | |
2635 | if (match.mask->n_proto != ETHER_TYPE_FULL_MASK) { | |
e086be9a VCG |
2636 | NL_SET_ERR_MSG_MOD(extack, "Only full mask is supported for EtherType filter"); |
2637 | return -EINVAL; | |
2638 | } | |
2639 | ||
2640 | input->filter.match_flags |= IGB_FILTER_FLAG_ETHER_TYPE; | |
8f256622 | 2641 | input->filter.etype = match.key->n_proto; |
e086be9a VCG |
2642 | } |
2643 | } | |
2644 | ||
8f256622 PNA |
2645 | if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) { |
2646 | struct flow_match_vlan match; | |
e086be9a | 2647 | |
8f256622 PNA |
2648 | flow_rule_match_vlan(rule, &match); |
2649 | if (match.mask->vlan_priority) { | |
2650 | if (match.mask->vlan_priority != VLAN_PRIO_FULL_MASK) { | |
e086be9a VCG |
2651 | NL_SET_ERR_MSG_MOD(extack, "Only full mask is supported for VLAN priority"); |
2652 | return -EINVAL; | |
2653 | } | |
2654 | ||
2655 | input->filter.match_flags |= IGB_FILTER_FLAG_VLAN_TCI; | |
8f256622 | 2656 | input->filter.vlan_tci = match.key->vlan_priority; |
e086be9a VCG |
2657 | } |
2658 | } | |
2659 | ||
2660 | input->action = traffic_class; | |
2661 | input->cookie = f->cookie; | |
2662 | ||
2663 | return 0; | |
2664 | } | |
2665 | ||
f8f3d34e | 2666 | static int igb_configure_clsflower(struct igb_adapter *adapter, |
f9e30088 | 2667 | struct flow_cls_offload *cls_flower) |
f8f3d34e | 2668 | { |
e086be9a VCG |
2669 | struct netlink_ext_ack *extack = cls_flower->common.extack; |
2670 | struct igb_nfc_filter *filter, *f; | |
2671 | int err, tc; | |
2672 | ||
2673 | tc = tc_classid_to_hwtc(adapter->netdev, cls_flower->classid); | |
2674 | if (tc < 0) { | |
2675 | NL_SET_ERR_MSG_MOD(extack, "Invalid traffic class"); | |
2676 | return -EINVAL; | |
2677 | } | |
2678 | ||
2679 | filter = kzalloc(sizeof(*filter), GFP_KERNEL); | |
2680 | if (!filter) | |
2681 | return -ENOMEM; | |
2682 | ||
2683 | err = igb_parse_cls_flower(adapter, cls_flower, tc, filter); | |
2684 | if (err < 0) | |
2685 | goto err_parse; | |
2686 | ||
2687 | spin_lock(&adapter->nfc_lock); | |
2688 | ||
2689 | hlist_for_each_entry(f, &adapter->nfc_filter_list, nfc_node) { | |
2690 | if (!memcmp(&f->filter, &filter->filter, sizeof(f->filter))) { | |
2691 | err = -EEXIST; | |
2692 | NL_SET_ERR_MSG_MOD(extack, | |
2693 | "This filter is already set in ethtool"); | |
2694 | goto err_locked; | |
2695 | } | |
2696 | } | |
2697 | ||
2698 | hlist_for_each_entry(f, &adapter->cls_flower_list, nfc_node) { | |
2699 | if (!memcmp(&f->filter, &filter->filter, sizeof(f->filter))) { | |
2700 | err = -EEXIST; | |
2701 | NL_SET_ERR_MSG_MOD(extack, | |
2702 | "This filter is already set in cls_flower"); | |
2703 | goto err_locked; | |
2704 | } | |
2705 | } | |
2706 | ||
2707 | err = igb_add_filter(adapter, filter); | |
2708 | if (err < 0) { | |
2709 | NL_SET_ERR_MSG_MOD(extack, "Could not add filter to the adapter"); | |
2710 | goto err_locked; | |
2711 | } | |
2712 | ||
2713 | hlist_add_head(&filter->nfc_node, &adapter->cls_flower_list); | |
2714 | ||
2715 | spin_unlock(&adapter->nfc_lock); | |
2716 | ||
2717 | return 0; | |
2718 | ||
2719 | err_locked: | |
2720 | spin_unlock(&adapter->nfc_lock); | |
2721 | ||
2722 | err_parse: | |
2723 | kfree(filter); | |
2724 | ||
2725 | return err; | |
f8f3d34e VCG |
2726 | } |
2727 | ||
2728 | static int igb_delete_clsflower(struct igb_adapter *adapter, | |
f9e30088 | 2729 | struct flow_cls_offload *cls_flower) |
f8f3d34e | 2730 | { |
e086be9a VCG |
2731 | struct igb_nfc_filter *filter; |
2732 | int err; | |
2733 | ||
2734 | spin_lock(&adapter->nfc_lock); | |
2735 | ||
2736 | hlist_for_each_entry(filter, &adapter->cls_flower_list, nfc_node) | |
2737 | if (filter->cookie == cls_flower->cookie) | |
2738 | break; | |
2739 | ||
2740 | if (!filter) { | |
2741 | err = -ENOENT; | |
2742 | goto out; | |
2743 | } | |
2744 | ||
2745 | err = igb_erase_filter(adapter, filter); | |
2746 | if (err < 0) | |
2747 | goto out; | |
2748 | ||
2749 | hlist_del(&filter->nfc_node); | |
2750 | kfree(filter); | |
2751 | ||
2752 | out: | |
2753 | spin_unlock(&adapter->nfc_lock); | |
2754 | ||
2755 | return err; | |
f8f3d34e VCG |
2756 | } |
2757 | ||
2758 | static int igb_setup_tc_cls_flower(struct igb_adapter *adapter, | |
f9e30088 | 2759 | struct flow_cls_offload *cls_flower) |
f8f3d34e VCG |
2760 | { |
2761 | switch (cls_flower->command) { | |
f9e30088 | 2762 | case FLOW_CLS_REPLACE: |
f8f3d34e | 2763 | return igb_configure_clsflower(adapter, cls_flower); |
f9e30088 | 2764 | case FLOW_CLS_DESTROY: |
f8f3d34e | 2765 | return igb_delete_clsflower(adapter, cls_flower); |
f9e30088 | 2766 | case FLOW_CLS_STATS: |
f8f3d34e VCG |
2767 | return -EOPNOTSUPP; |
2768 | default: | |
246ab6f0 | 2769 | return -EOPNOTSUPP; |
f8f3d34e VCG |
2770 | } |
2771 | } | |
2772 | ||
2773 | static int igb_setup_tc_block_cb(enum tc_setup_type type, void *type_data, | |
2774 | void *cb_priv) | |
2775 | { | |
2776 | struct igb_adapter *adapter = cb_priv; | |
2777 | ||
2778 | if (!tc_cls_can_offload_and_chain0(adapter->netdev, type_data)) | |
2779 | return -EOPNOTSUPP; | |
2780 | ||
2781 | switch (type) { | |
2782 | case TC_SETUP_CLSFLOWER: | |
2783 | return igb_setup_tc_cls_flower(adapter, type_data); | |
2784 | ||
2785 | default: | |
2786 | return -EOPNOTSUPP; | |
2787 | } | |
2788 | } | |
2789 | ||
3048cf84 JSP |
2790 | static int igb_offload_txtime(struct igb_adapter *adapter, |
2791 | struct tc_etf_qopt_offload *qopt) | |
2792 | { | |
2793 | struct e1000_hw *hw = &adapter->hw; | |
2794 | int err; | |
2795 | ||
2796 | /* Launchtime offloading is only supported by i210 controller. */ | |
2797 | if (hw->mac.type != e1000_i210) | |
2798 | return -EOPNOTSUPP; | |
2799 | ||
2800 | /* Launchtime offloading is only supported by queues 0 and 1. */ | |
2801 | if (qopt->queue < 0 || qopt->queue > 1) | |
2802 | return -EINVAL; | |
2803 | ||
2804 | err = igb_save_txtime_params(adapter, qopt->queue, qopt->enable); | |
2805 | if (err) | |
2806 | return err; | |
2807 | ||
2808 | igb_offload_apply(adapter, qopt->queue); | |
2809 | ||
2810 | return 0; | |
2811 | } | |
2812 | ||
955bcb6e PNA |
2813 | static LIST_HEAD(igb_block_cb_list); |
2814 | ||
05f9d3e1 AG |
2815 | static int igb_setup_tc(struct net_device *dev, enum tc_setup_type type, |
2816 | void *type_data) | |
2817 | { | |
2818 | struct igb_adapter *adapter = netdev_priv(dev); | |
2819 | ||
2820 | switch (type) { | |
8521db4c | 2821 | case TC_SETUP_QDISC_CBS: |
05f9d3e1 | 2822 | return igb_offload_cbs(adapter, type_data); |
f8f3d34e | 2823 | case TC_SETUP_BLOCK: |
955bcb6e PNA |
2824 | return flow_block_cb_setup_simple(type_data, |
2825 | &igb_block_cb_list, | |
4e95bc26 PNA |
2826 | igb_setup_tc_block_cb, |
2827 | adapter, adapter, true); | |
2828 | ||
3048cf84 JSP |
2829 | case TC_SETUP_QDISC_ETF: |
2830 | return igb_offload_txtime(adapter, type_data); | |
05f9d3e1 AG |
2831 | |
2832 | default: | |
2833 | return -EOPNOTSUPP; | |
2834 | } | |
2835 | } | |
2836 | ||
2e5c6922 | 2837 | static const struct net_device_ops igb_netdev_ops = { |
559e9c49 | 2838 | .ndo_open = igb_open, |
2e5c6922 | 2839 | .ndo_stop = igb_close, |
cd392f5c | 2840 | .ndo_start_xmit = igb_xmit_frame, |
12dcd86b | 2841 | .ndo_get_stats64 = igb_get_stats64, |
ff41f8dc | 2842 | .ndo_set_rx_mode = igb_set_rx_mode, |
2e5c6922 SH |
2843 | .ndo_set_mac_address = igb_set_mac, |
2844 | .ndo_change_mtu = igb_change_mtu, | |
2845 | .ndo_do_ioctl = igb_ioctl, | |
2846 | .ndo_tx_timeout = igb_tx_timeout, | |
2847 | .ndo_validate_addr = eth_validate_addr, | |
2e5c6922 SH |
2848 | .ndo_vlan_rx_add_vid = igb_vlan_rx_add_vid, |
2849 | .ndo_vlan_rx_kill_vid = igb_vlan_rx_kill_vid, | |
8151d294 WM |
2850 | .ndo_set_vf_mac = igb_ndo_set_vf_mac, |
2851 | .ndo_set_vf_vlan = igb_ndo_set_vf_vlan, | |
ed616689 | 2852 | .ndo_set_vf_rate = igb_ndo_set_vf_bw, |
70ea4783 | 2853 | .ndo_set_vf_spoofchk = igb_ndo_set_vf_spoofchk, |
1b8b062a | 2854 | .ndo_set_vf_trust = igb_ndo_set_vf_trust, |
8151d294 | 2855 | .ndo_get_vf_config = igb_ndo_get_vf_config, |
b2cb09b1 JP |
2856 | .ndo_fix_features = igb_fix_features, |
2857 | .ndo_set_features = igb_set_features, | |
268f9d33 | 2858 | .ndo_fdb_add = igb_ndo_fdb_add, |
e10715d3 | 2859 | .ndo_features_check = igb_features_check, |
05f9d3e1 | 2860 | .ndo_setup_tc = igb_setup_tc, |
2e5c6922 SH |
2861 | }; |
2862 | ||
d67974f0 CW |
2863 | /** |
2864 | * igb_set_fw_version - Configure version string for ethtool | |
2865 | * @adapter: adapter struct | |
d67974f0 CW |
2866 | **/ |
2867 | void igb_set_fw_version(struct igb_adapter *adapter) | |
2868 | { | |
2869 | struct e1000_hw *hw = &adapter->hw; | |
0b1a6f2e CW |
2870 | struct e1000_fw_version fw; |
2871 | ||
2872 | igb_get_fw_version(hw, &fw); | |
2873 | ||
2874 | switch (hw->mac.type) { | |
7dc98a62 | 2875 | case e1000_i210: |
0b1a6f2e | 2876 | case e1000_i211: |
7dc98a62 CW |
2877 | if (!(igb_get_flash_presence_i210(hw))) { |
2878 | snprintf(adapter->fw_version, | |
2879 | sizeof(adapter->fw_version), | |
2880 | "%2d.%2d-%d", | |
2881 | fw.invm_major, fw.invm_minor, | |
2882 | fw.invm_img_type); | |
2883 | break; | |
2884 | } | |
2885 | /* fall through */ | |
0b1a6f2e CW |
2886 | default: |
2887 | /* if option is rom valid, display its version too */ | |
2888 | if (fw.or_valid) { | |
2889 | snprintf(adapter->fw_version, | |
2890 | sizeof(adapter->fw_version), | |
2891 | "%d.%d, 0x%08x, %d.%d.%d", | |
2892 | fw.eep_major, fw.eep_minor, fw.etrack_id, | |
2893 | fw.or_major, fw.or_build, fw.or_patch); | |
2894 | /* no option rom */ | |
7dc98a62 | 2895 | } else if (fw.etrack_id != 0X0000) { |
0b1a6f2e | 2896 | snprintf(adapter->fw_version, |
7dc98a62 CW |
2897 | sizeof(adapter->fw_version), |
2898 | "%d.%d, 0x%08x", | |
2899 | fw.eep_major, fw.eep_minor, fw.etrack_id); | |
2900 | } else { | |
2901 | snprintf(adapter->fw_version, | |
2902 | sizeof(adapter->fw_version), | |
2903 | "%d.%d.%d", | |
2904 | fw.eep_major, fw.eep_minor, fw.eep_build); | |
0b1a6f2e CW |
2905 | } |
2906 | break; | |
d67974f0 | 2907 | } |
d67974f0 CW |
2908 | } |
2909 | ||
56cec249 CW |
2910 | /** |
2911 | * igb_init_mas - init Media Autosense feature if enabled in the NVM | |
2912 | * | |
2913 | * @adapter: adapter struct | |
2914 | **/ | |
2915 | static void igb_init_mas(struct igb_adapter *adapter) | |
2916 | { | |
2917 | struct e1000_hw *hw = &adapter->hw; | |
2918 | u16 eeprom_data; | |
2919 | ||
2920 | hw->nvm.ops.read(hw, NVM_COMPAT, 1, &eeprom_data); | |
2921 | switch (hw->bus.func) { | |
2922 | case E1000_FUNC_0: | |
2923 | if (eeprom_data & IGB_MAS_ENABLE_0) { | |
2924 | adapter->flags |= IGB_FLAG_MAS_ENABLE; | |
2925 | netdev_info(adapter->netdev, | |
2926 | "MAS: Enabling Media Autosense for port %d\n", | |
2927 | hw->bus.func); | |
2928 | } | |
2929 | break; | |
2930 | case E1000_FUNC_1: | |
2931 | if (eeprom_data & IGB_MAS_ENABLE_1) { | |
2932 | adapter->flags |= IGB_FLAG_MAS_ENABLE; | |
2933 | netdev_info(adapter->netdev, | |
2934 | "MAS: Enabling Media Autosense for port %d\n", | |
2935 | hw->bus.func); | |
2936 | } | |
2937 | break; | |
2938 | case E1000_FUNC_2: | |
2939 | if (eeprom_data & IGB_MAS_ENABLE_2) { | |
2940 | adapter->flags |= IGB_FLAG_MAS_ENABLE; | |
2941 | netdev_info(adapter->netdev, | |
2942 | "MAS: Enabling Media Autosense for port %d\n", | |
2943 | hw->bus.func); | |
2944 | } | |
2945 | break; | |
2946 | case E1000_FUNC_3: | |
2947 | if (eeprom_data & IGB_MAS_ENABLE_3) { | |
2948 | adapter->flags |= IGB_FLAG_MAS_ENABLE; | |
2949 | netdev_info(adapter->netdev, | |
2950 | "MAS: Enabling Media Autosense for port %d\n", | |
2951 | hw->bus.func); | |
2952 | } | |
2953 | break; | |
2954 | default: | |
2955 | /* Shouldn't get here */ | |
2956 | netdev_err(adapter->netdev, | |
2957 | "MAS: Invalid port configuration, returning\n"); | |
2958 | break; | |
2959 | } | |
2960 | } | |
2961 | ||
b980ac18 JK |
2962 | /** |
2963 | * igb_init_i2c - Init I2C interface | |
441fc6fd | 2964 | * @adapter: pointer to adapter structure |
b980ac18 | 2965 | **/ |
441fc6fd CW |
2966 | static s32 igb_init_i2c(struct igb_adapter *adapter) |
2967 | { | |
23d87824 | 2968 | s32 status = 0; |
441fc6fd CW |
2969 | |
2970 | /* I2C interface supported on i350 devices */ | |
2971 | if (adapter->hw.mac.type != e1000_i350) | |
23d87824 | 2972 | return 0; |
441fc6fd CW |
2973 | |
2974 | /* Initialize the i2c bus which is controlled by the registers. | |
2975 | * This bus will use the i2c_algo_bit structue that implements | |
2976 | * the protocol through toggling of the 4 bits in the register. | |
2977 | */ | |
2978 | adapter->i2c_adap.owner = THIS_MODULE; | |
2979 | adapter->i2c_algo = igb_i2c_algo; | |
2980 | adapter->i2c_algo.data = adapter; | |
2981 | adapter->i2c_adap.algo_data = &adapter->i2c_algo; | |
2982 | adapter->i2c_adap.dev.parent = &adapter->pdev->dev; | |
2983 | strlcpy(adapter->i2c_adap.name, "igb BB", | |
2984 | sizeof(adapter->i2c_adap.name)); | |
2985 | status = i2c_bit_add_bus(&adapter->i2c_adap); | |
2986 | return status; | |
2987 | } | |
2988 | ||
9d5c8243 | 2989 | /** |
b980ac18 JK |
2990 | * igb_probe - Device Initialization Routine |
2991 | * @pdev: PCI device information struct | |
2992 | * @ent: entry in igb_pci_tbl | |
9d5c8243 | 2993 | * |
b980ac18 | 2994 | * Returns 0 on success, negative on failure |
9d5c8243 | 2995 | * |
b980ac18 JK |
2996 | * igb_probe initializes an adapter identified by a pci_dev structure. |
2997 | * The OS initialization, configuring of the adapter private structure, | |
2998 | * and a hardware reset occur. | |
9d5c8243 | 2999 | **/ |
1dd06ae8 | 3000 | static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent) |
9d5c8243 AK |
3001 | { |
3002 | struct net_device *netdev; | |
3003 | struct igb_adapter *adapter; | |
3004 | struct e1000_hw *hw; | |
4337e993 | 3005 | u16 eeprom_data = 0; |
9835fd73 | 3006 | s32 ret_val; |
4337e993 | 3007 | static int global_quad_port_a; /* global quad port a indication */ |
9d5c8243 | 3008 | const struct e1000_info *ei = igb_info_tbl[ent->driver_data]; |
2d6a5e95 | 3009 | int err, pci_using_dac; |
9835fd73 | 3010 | u8 part_str[E1000_PBANUM_LENGTH]; |
9d5c8243 | 3011 | |
bded64a7 AG |
3012 | /* Catch broken hardware that put the wrong VF device ID in |
3013 | * the PCIe SR-IOV capability. | |
3014 | */ | |
3015 | if (pdev->is_virtfn) { | |
3016 | WARN(1, KERN_ERR "%s (%hx:%hx) should not be a VF!\n", | |
f96a8a0b | 3017 | pci_name(pdev), pdev->vendor, pdev->device); |
bded64a7 AG |
3018 | return -EINVAL; |
3019 | } | |
3020 | ||
aed5dec3 | 3021 | err = pci_enable_device_mem(pdev); |
9d5c8243 AK |
3022 | if (err) |
3023 | return err; | |
3024 | ||
3025 | pci_using_dac = 0; | |
dc4ff9bb | 3026 | err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); |
9d5c8243 | 3027 | if (!err) { |
dc4ff9bb | 3028 | pci_using_dac = 1; |
9d5c8243 | 3029 | } else { |
dc4ff9bb | 3030 | err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); |
9d5c8243 | 3031 | if (err) { |
dc4ff9bb RK |
3032 | dev_err(&pdev->dev, |
3033 | "No usable DMA configuration, aborting\n"); | |
3034 | goto err_dma; | |
9d5c8243 AK |
3035 | } |
3036 | } | |
3037 | ||
56d766d6 | 3038 | err = pci_request_mem_regions(pdev, igb_driver_name); |
9d5c8243 AK |
3039 | if (err) |
3040 | goto err_pci_reg; | |
3041 | ||
19d5afd4 | 3042 | pci_enable_pcie_error_reporting(pdev); |
40a914fa | 3043 | |
9d5c8243 | 3044 | pci_set_master(pdev); |
c682fc23 | 3045 | pci_save_state(pdev); |
9d5c8243 AK |
3046 | |
3047 | err = -ENOMEM; | |
1bfaf07b | 3048 | netdev = alloc_etherdev_mq(sizeof(struct igb_adapter), |
1cc3bd87 | 3049 | IGB_MAX_TX_QUEUES); |
9d5c8243 AK |
3050 | if (!netdev) |
3051 | goto err_alloc_etherdev; | |
3052 | ||
3053 | SET_NETDEV_DEV(netdev, &pdev->dev); | |
3054 | ||
3055 | pci_set_drvdata(pdev, netdev); | |
3056 | adapter = netdev_priv(netdev); | |
3057 | adapter->netdev = netdev; | |
3058 | adapter->pdev = pdev; | |
3059 | hw = &adapter->hw; | |
3060 | hw->back = adapter; | |
b3f4d599 | 3061 | adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE); |
9d5c8243 | 3062 | |
9d5c8243 | 3063 | err = -EIO; |
73bf8048 JW |
3064 | adapter->io_addr = pci_iomap(pdev, 0, 0); |
3065 | if (!adapter->io_addr) | |
9d5c8243 | 3066 | goto err_ioremap; |
73bf8048 JW |
3067 | /* hw->hw_addr can be altered, we'll use adapter->io_addr for unmap */ |
3068 | hw->hw_addr = adapter->io_addr; | |
9d5c8243 | 3069 | |
2e5c6922 | 3070 | netdev->netdev_ops = &igb_netdev_ops; |
9d5c8243 | 3071 | igb_set_ethtool_ops(netdev); |
9d5c8243 | 3072 | netdev->watchdog_timeo = 5 * HZ; |
9d5c8243 AK |
3073 | |
3074 | strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1); | |
3075 | ||
89dbefb2 AS |
3076 | netdev->mem_start = pci_resource_start(pdev, 0); |
3077 | netdev->mem_end = pci_resource_end(pdev, 0); | |
9d5c8243 | 3078 | |
9d5c8243 AK |
3079 | /* PCI config space info */ |
3080 | hw->vendor_id = pdev->vendor; | |
3081 | hw->device_id = pdev->device; | |
3082 | hw->revision_id = pdev->revision; | |
3083 | hw->subsystem_vendor_id = pdev->subsystem_vendor; | |
3084 | hw->subsystem_device_id = pdev->subsystem_device; | |
3085 | ||
9d5c8243 AK |
3086 | /* Copy the default MAC, PHY and NVM function pointers */ |
3087 | memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops)); | |
3088 | memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops)); | |
3089 | memcpy(&hw->nvm.ops, ei->nvm_ops, sizeof(hw->nvm.ops)); | |
3090 | /* Initialize skew-specific constants */ | |
3091 | err = ei->get_invariants(hw); | |
3092 | if (err) | |
450c87c8 | 3093 | goto err_sw_init; |
9d5c8243 | 3094 | |
450c87c8 | 3095 | /* setup the private structure */ |
9d5c8243 AK |
3096 | err = igb_sw_init(adapter); |
3097 | if (err) | |
3098 | goto err_sw_init; | |
3099 | ||
3100 | igb_get_bus_info_pcie(hw); | |
3101 | ||
3102 | hw->phy.autoneg_wait_to_complete = false; | |
9d5c8243 AK |
3103 | |
3104 | /* Copper options */ | |
3105 | if (hw->phy.media_type == e1000_media_type_copper) { | |
3106 | hw->phy.mdix = AUTO_ALL_MODES; | |
3107 | hw->phy.disable_polarity_correction = false; | |
3108 | hw->phy.ms_type = e1000_ms_hw_default; | |
3109 | } | |
3110 | ||
3111 | if (igb_check_reset_block(hw)) | |
3112 | dev_info(&pdev->dev, | |
3113 | "PHY reset is blocked due to SOL/IDER session.\n"); | |
3114 | ||
b980ac18 | 3115 | /* features is initialized to 0 in allocation, it might have bits |
077887c3 AD |
3116 | * set by igb_sw_init so we should use an or instead of an |
3117 | * assignment. | |
3118 | */ | |
3119 | netdev->features |= NETIF_F_SG | | |
077887c3 AD |
3120 | NETIF_F_TSO | |
3121 | NETIF_F_TSO6 | | |
3122 | NETIF_F_RXHASH | | |
3123 | NETIF_F_RXCSUM | | |
e10715d3 | 3124 | NETIF_F_HW_CSUM; |
077887c3 | 3125 | |
6e033700 | 3126 | if (hw->mac.type >= e1000_82576) |
4085d06d | 3127 | netdev->features |= NETIF_F_SCTP_CRC | NETIF_F_GSO_UDP_L4; |
6e033700 | 3128 | |
6995ddc4 VCG |
3129 | if (hw->mac.type >= e1000_i350) |
3130 | netdev->features |= NETIF_F_HW_TC; | |
3131 | ||
e10715d3 AD |
3132 | #define IGB_GSO_PARTIAL_FEATURES (NETIF_F_GSO_GRE | \ |
3133 | NETIF_F_GSO_GRE_CSUM | \ | |
7e13318d | 3134 | NETIF_F_GSO_IPXIP4 | \ |
bf2d1df3 | 3135 | NETIF_F_GSO_IPXIP6 | \ |
e10715d3 AD |
3136 | NETIF_F_GSO_UDP_TUNNEL | \ |
3137 | NETIF_F_GSO_UDP_TUNNEL_CSUM) | |
3138 | ||
3139 | netdev->gso_partial_features = IGB_GSO_PARTIAL_FEATURES; | |
3140 | netdev->features |= NETIF_F_GSO_PARTIAL | IGB_GSO_PARTIAL_FEATURES; | |
3141 | ||
077887c3 | 3142 | /* copy netdev features into list of user selectable features */ |
e10715d3 AD |
3143 | netdev->hw_features |= netdev->features | |
3144 | NETIF_F_HW_VLAN_CTAG_RX | | |
3145 | NETIF_F_HW_VLAN_CTAG_TX | | |
3146 | NETIF_F_RXALL; | |
077887c3 | 3147 | |
6e033700 AD |
3148 | if (hw->mac.type >= e1000_i350) |
3149 | netdev->hw_features |= NETIF_F_NTUPLE; | |
3150 | ||
e10715d3 AD |
3151 | if (pci_using_dac) |
3152 | netdev->features |= NETIF_F_HIGHDMA; | |
6e033700 | 3153 | |
e10715d3 | 3154 | netdev->vlan_features |= netdev->features | NETIF_F_TSO_MANGLEID; |
6e033700 | 3155 | netdev->mpls_features |= NETIF_F_HW_CSUM; |
e10715d3 | 3156 | netdev->hw_enc_features |= netdev->vlan_features; |
48f29ffc | 3157 | |
e10715d3 AD |
3158 | /* set this bit last since it cannot be part of vlan_features */ |
3159 | netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER | | |
3160 | NETIF_F_HW_VLAN_CTAG_RX | | |
3161 | NETIF_F_HW_VLAN_CTAG_TX; | |
6b8f0922 | 3162 | |
e10715d3 | 3163 | netdev->priv_flags |= IFF_SUPP_NOFCS; |
9d5c8243 | 3164 | |
01789349 JP |
3165 | netdev->priv_flags |= IFF_UNICAST_FLT; |
3166 | ||
91c527a5 JW |
3167 | /* MTU range: 68 - 9216 */ |
3168 | netdev->min_mtu = ETH_MIN_MTU; | |
3169 | netdev->max_mtu = MAX_STD_JUMBO_FRAME_SIZE; | |
3170 | ||
330a6d6a | 3171 | adapter->en_mng_pt = igb_enable_mng_pass_thru(hw); |
9d5c8243 AK |
3172 | |
3173 | /* before reading the NVM, reset the controller to put the device in a | |
b980ac18 JK |
3174 | * known good starting state |
3175 | */ | |
9d5c8243 AK |
3176 | hw->mac.ops.reset_hw(hw); |
3177 | ||
ef3a0092 CW |
3178 | /* make sure the NVM is good , i211/i210 parts can have special NVM |
3179 | * that doesn't contain a checksum | |
f96a8a0b | 3180 | */ |
ef3a0092 CW |
3181 | switch (hw->mac.type) { |
3182 | case e1000_i210: | |
3183 | case e1000_i211: | |
3184 | if (igb_get_flash_presence_i210(hw)) { | |
3185 | if (hw->nvm.ops.validate(hw) < 0) { | |
3186 | dev_err(&pdev->dev, | |
3187 | "The NVM Checksum Is Not Valid\n"); | |
3188 | err = -EIO; | |
3189 | goto err_eeprom; | |
3190 | } | |
3191 | } | |
3192 | break; | |
3193 | default: | |
f96a8a0b CW |
3194 | if (hw->nvm.ops.validate(hw) < 0) { |
3195 | dev_err(&pdev->dev, "The NVM Checksum Is Not Valid\n"); | |
3196 | err = -EIO; | |
3197 | goto err_eeprom; | |
3198 | } | |
ef3a0092 | 3199 | break; |
9d5c8243 AK |
3200 | } |
3201 | ||
806ffb1d JH |
3202 | if (eth_platform_get_mac_address(&pdev->dev, hw->mac.addr)) { |
3203 | /* copy the MAC address out of the NVM */ | |
3204 | if (hw->mac.ops.read_mac_addr(hw)) | |
3205 | dev_err(&pdev->dev, "NVM Read Error\n"); | |
3206 | } | |
9d5c8243 AK |
3207 | |
3208 | memcpy(netdev->dev_addr, hw->mac.addr, netdev->addr_len); | |
9d5c8243 | 3209 | |
aaeb6cdf | 3210 | if (!is_valid_ether_addr(netdev->dev_addr)) { |
9d5c8243 AK |
3211 | dev_err(&pdev->dev, "Invalid MAC Address\n"); |
3212 | err = -EIO; | |
3213 | goto err_eeprom; | |
3214 | } | |
3215 | ||
83c21335 YK |
3216 | igb_set_default_mac_filter(adapter); |
3217 | ||
d67974f0 CW |
3218 | /* get firmware version for ethtool -i */ |
3219 | igb_set_fw_version(adapter); | |
3220 | ||
27dff8b2 TF |
3221 | /* configure RXPBSIZE and TXPBSIZE */ |
3222 | if (hw->mac.type == e1000_i210) { | |
3223 | wr32(E1000_RXPBS, I210_RXPBSIZE_DEFAULT); | |
3224 | wr32(E1000_TXPBS, I210_TXPBSIZE_DEFAULT); | |
3225 | } | |
3226 | ||
26566eae KC |
3227 | timer_setup(&adapter->watchdog_timer, igb_watchdog, 0); |
3228 | timer_setup(&adapter->phy_info_timer, igb_update_phy_info, 0); | |
9d5c8243 AK |
3229 | |
3230 | INIT_WORK(&adapter->reset_task, igb_reset_task); | |
3231 | INIT_WORK(&adapter->watchdog_task, igb_watchdog_task); | |
3232 | ||
450c87c8 | 3233 | /* Initialize link properties that are user-changeable */ |
9d5c8243 AK |
3234 | adapter->fc_autoneg = true; |
3235 | hw->mac.autoneg = true; | |
3236 | hw->phy.autoneg_advertised = 0x2f; | |
3237 | ||
0cce119a AD |
3238 | hw->fc.requested_mode = e1000_fc_default; |
3239 | hw->fc.current_mode = e1000_fc_default; | |
9d5c8243 | 3240 | |
9d5c8243 AK |
3241 | igb_validate_mdi_setting(hw); |
3242 | ||
63d4a8f9 | 3243 | /* By default, support wake on port A */ |
a2cf8b6c | 3244 | if (hw->bus.func == 0) |
63d4a8f9 MV |
3245 | adapter->flags |= IGB_FLAG_WOL_SUPPORTED; |
3246 | ||
3247 | /* Check the NVM for wake support on non-port A ports */ | |
3248 | if (hw->mac.type >= e1000_82580) | |
55cac248 | 3249 | hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A + |
b980ac18 JK |
3250 | NVM_82580_LAN_FUNC_OFFSET(hw->bus.func), 1, |
3251 | &eeprom_data); | |
a2cf8b6c AD |
3252 | else if (hw->bus.func == 1) |
3253 | hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data); | |
9d5c8243 | 3254 | |
63d4a8f9 MV |
3255 | if (eeprom_data & IGB_EEPROM_APME) |
3256 | adapter->flags |= IGB_FLAG_WOL_SUPPORTED; | |
9d5c8243 AK |
3257 | |
3258 | /* now that we have the eeprom settings, apply the special cases where | |
3259 | * the eeprom may be wrong or the board simply won't support wake on | |
b980ac18 JK |
3260 | * lan on a particular port |
3261 | */ | |
9d5c8243 AK |
3262 | switch (pdev->device) { |
3263 | case E1000_DEV_ID_82575GB_QUAD_COPPER: | |
63d4a8f9 | 3264 | adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED; |
9d5c8243 AK |
3265 | break; |
3266 | case E1000_DEV_ID_82575EB_FIBER_SERDES: | |
2d064c06 AD |
3267 | case E1000_DEV_ID_82576_FIBER: |
3268 | case E1000_DEV_ID_82576_SERDES: | |
9d5c8243 | 3269 | /* Wake events only supported on port A for dual fiber |
b980ac18 JK |
3270 | * regardless of eeprom setting |
3271 | */ | |
9d5c8243 | 3272 | if (rd32(E1000_STATUS) & E1000_STATUS_FUNC_1) |
63d4a8f9 | 3273 | adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED; |
9d5c8243 | 3274 | break; |
c8ea5ea9 | 3275 | case E1000_DEV_ID_82576_QUAD_COPPER: |
d5aa2252 | 3276 | case E1000_DEV_ID_82576_QUAD_COPPER_ET2: |
c8ea5ea9 AD |
3277 | /* if quad port adapter, disable WoL on all but port A */ |
3278 | if (global_quad_port_a != 0) | |
63d4a8f9 | 3279 | adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED; |
c8ea5ea9 AD |
3280 | else |
3281 | adapter->flags |= IGB_FLAG_QUAD_PORT_A; | |
3282 | /* Reset for multiple quad port adapters */ | |
3283 | if (++global_quad_port_a == 4) | |
3284 | global_quad_port_a = 0; | |
3285 | break; | |
63d4a8f9 MV |
3286 | default: |
3287 | /* If the device can't wake, don't set software support */ | |
3288 | if (!device_can_wakeup(&adapter->pdev->dev)) | |
3289 | adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED; | |
9d5c8243 AK |
3290 | } |
3291 | ||
3292 | /* initialize the wol settings based on the eeprom settings */ | |
63d4a8f9 MV |
3293 | if (adapter->flags & IGB_FLAG_WOL_SUPPORTED) |
3294 | adapter->wol |= E1000_WUFC_MAG; | |
3295 | ||
3296 | /* Some vendors want WoL disabled by default, but still supported */ | |
3297 | if ((hw->mac.type == e1000_i350) && | |
3298 | (pdev->subsystem_vendor == PCI_VENDOR_ID_HP)) { | |
3299 | adapter->flags |= IGB_FLAG_WOL_SUPPORTED; | |
3300 | adapter->wol = 0; | |
3301 | } | |
3302 | ||
5e350b92 TF |
3303 | /* Some vendors want the ability to Use the EEPROM setting as |
3304 | * enable/disable only, and not for capability | |
3305 | */ | |
3306 | if (((hw->mac.type == e1000_i350) || | |
3307 | (hw->mac.type == e1000_i354)) && | |
3308 | (pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)) { | |
3309 | adapter->flags |= IGB_FLAG_WOL_SUPPORTED; | |
3310 | adapter->wol = 0; | |
3311 | } | |
3312 | if (hw->mac.type == e1000_i350) { | |
3313 | if (((pdev->subsystem_device == 0x5001) || | |
3314 | (pdev->subsystem_device == 0x5002)) && | |
3315 | (hw->bus.func == 0)) { | |
3316 | adapter->flags |= IGB_FLAG_WOL_SUPPORTED; | |
3317 | adapter->wol = 0; | |
3318 | } | |
3319 | if (pdev->subsystem_device == 0x1F52) | |
3320 | adapter->flags |= IGB_FLAG_WOL_SUPPORTED; | |
3321 | } | |
3322 | ||
63d4a8f9 MV |
3323 | device_set_wakeup_enable(&adapter->pdev->dev, |
3324 | adapter->flags & IGB_FLAG_WOL_SUPPORTED); | |
9d5c8243 AK |
3325 | |
3326 | /* reset the hardware with the new settings */ | |
3327 | igb_reset(adapter); | |
3328 | ||
441fc6fd CW |
3329 | /* Init the I2C interface */ |
3330 | err = igb_init_i2c(adapter); | |
3331 | if (err) { | |
3332 | dev_err(&pdev->dev, "failed to init i2c interface\n"); | |
3333 | goto err_eeprom; | |
3334 | } | |
3335 | ||
9d5c8243 | 3336 | /* let the f/w know that the h/w is now under the control of the |
e52c0f96 CW |
3337 | * driver. |
3338 | */ | |
9d5c8243 AK |
3339 | igb_get_hw_control(adapter); |
3340 | ||
9d5c8243 AK |
3341 | strcpy(netdev->name, "eth%d"); |
3342 | err = register_netdev(netdev); | |
3343 | if (err) | |
3344 | goto err_register; | |
3345 | ||
b168dfc5 JB |
3346 | /* carrier off reporting is important to ethtool even BEFORE open */ |
3347 | netif_carrier_off(netdev); | |
3348 | ||
421e02f0 | 3349 | #ifdef CONFIG_IGB_DCA |
bbd98fe4 | 3350 | if (dca_add_requester(&pdev->dev) == 0) { |
7dfc16fa | 3351 | adapter->flags |= IGB_FLAG_DCA_ENABLED; |
fe4506b6 | 3352 | dev_info(&pdev->dev, "DCA enabled\n"); |
fe4506b6 JC |
3353 | igb_setup_dca(adapter); |
3354 | } | |
fe4506b6 | 3355 | |
38c845c7 | 3356 | #endif |
e428893b CW |
3357 | #ifdef CONFIG_IGB_HWMON |
3358 | /* Initialize the thermal sensor on i350 devices. */ | |
3359 | if (hw->mac.type == e1000_i350 && hw->bus.func == 0) { | |
3360 | u16 ets_word; | |
3c89f6d0 | 3361 | |
b980ac18 | 3362 | /* Read the NVM to determine if this i350 device supports an |
e428893b CW |
3363 | * external thermal sensor. |
3364 | */ | |
3365 | hw->nvm.ops.read(hw, NVM_ETS_CFG, 1, &ets_word); | |
3366 | if (ets_word != 0x0000 && ets_word != 0xFFFF) | |
3367 | adapter->ets = true; | |
3368 | else | |
3369 | adapter->ets = false; | |
3370 | if (igb_sysfs_init(adapter)) | |
3371 | dev_err(&pdev->dev, | |
3372 | "failed to allocate sysfs resources\n"); | |
3373 | } else { | |
3374 | adapter->ets = false; | |
3375 | } | |
3376 | #endif | |
56cec249 CW |
3377 | /* Check if Media Autosense is enabled */ |
3378 | adapter->ei = *ei; | |
3379 | if (hw->dev_spec._82575.mas_capable) | |
3380 | igb_init_mas(adapter); | |
3381 | ||
673b8b70 | 3382 | /* do hw tstamp init after resetting */ |
7ebae817 | 3383 | igb_ptp_init(adapter); |
673b8b70 | 3384 | |
9d5c8243 | 3385 | dev_info(&pdev->dev, "Intel(R) Gigabit Ethernet Network Connection\n"); |
ceb5f13b CW |
3386 | /* print bus type/speed/width info, not applicable to i354 */ |
3387 | if (hw->mac.type != e1000_i354) { | |
3388 | dev_info(&pdev->dev, "%s: (PCIe:%s:%s) %pM\n", | |
3389 | netdev->name, | |
3390 | ((hw->bus.speed == e1000_bus_speed_2500) ? "2.5Gb/s" : | |
3391 | (hw->bus.speed == e1000_bus_speed_5000) ? "5.0Gb/s" : | |
3392 | "unknown"), | |
3393 | ((hw->bus.width == e1000_bus_width_pcie_x4) ? | |
3394 | "Width x4" : | |
3395 | (hw->bus.width == e1000_bus_width_pcie_x2) ? | |
3396 | "Width x2" : | |
3397 | (hw->bus.width == e1000_bus_width_pcie_x1) ? | |
3398 | "Width x1" : "unknown"), netdev->dev_addr); | |
3399 | } | |
9d5c8243 | 3400 | |
53ea6c7e TF |
3401 | if ((hw->mac.type >= e1000_i210 || |
3402 | igb_get_flash_presence_i210(hw))) { | |
3403 | ret_val = igb_read_part_string(hw, part_str, | |
3404 | E1000_PBANUM_LENGTH); | |
3405 | } else { | |
3406 | ret_val = -E1000_ERR_INVM_VALUE_NOT_FOUND; | |
3407 | } | |
3408 | ||
9835fd73 CW |
3409 | if (ret_val) |
3410 | strcpy(part_str, "Unknown"); | |
3411 | dev_info(&pdev->dev, "%s: PBA No: %s\n", netdev->name, part_str); | |
9d5c8243 AK |
3412 | dev_info(&pdev->dev, |
3413 | "Using %s interrupts. %d rx queue(s), %d tx queue(s)\n", | |
cd14ef54 | 3414 | (adapter->flags & IGB_FLAG_HAS_MSIX) ? "MSI-X" : |
7dfc16fa | 3415 | (adapter->flags & IGB_FLAG_HAS_MSI) ? "MSI" : "legacy", |
9d5c8243 | 3416 | adapter->num_rx_queues, adapter->num_tx_queues); |
f4c01e96 CW |
3417 | if (hw->phy.media_type == e1000_media_type_copper) { |
3418 | switch (hw->mac.type) { | |
3419 | case e1000_i350: | |
3420 | case e1000_i210: | |
3421 | case e1000_i211: | |
3422 | /* Enable EEE for internal copper PHY devices */ | |
c4c112f1 | 3423 | err = igb_set_eee_i350(hw, true, true); |
f4c01e96 CW |
3424 | if ((!err) && |
3425 | (!hw->dev_spec._82575.eee_disable)) { | |
3426 | adapter->eee_advert = | |
3427 | MDIO_EEE_100TX | MDIO_EEE_1000T; | |
3428 | adapter->flags |= IGB_FLAG_EEE; | |
3429 | } | |
3430 | break; | |
3431 | case e1000_i354: | |
ceb5f13b | 3432 | if ((rd32(E1000_CTRL_EXT) & |
f4c01e96 | 3433 | E1000_CTRL_EXT_LINK_MODE_SGMII)) { |
c4c112f1 | 3434 | err = igb_set_eee_i354(hw, true, true); |
f4c01e96 CW |
3435 | if ((!err) && |
3436 | (!hw->dev_spec._82575.eee_disable)) { | |
3437 | adapter->eee_advert = | |
3438 | MDIO_EEE_100TX | MDIO_EEE_1000T; | |
3439 | adapter->flags |= IGB_FLAG_EEE; | |
3440 | } | |
3441 | } | |
3442 | break; | |
3443 | default: | |
3444 | break; | |
ceb5f13b | 3445 | } |
09b068d4 | 3446 | } |
5b6e1321 KHF |
3447 | |
3448 | dev_pm_set_driver_flags(&pdev->dev, DPM_FLAG_NEVER_SKIP); | |
3449 | ||
749ab2cd | 3450 | pm_runtime_put_noidle(&pdev->dev); |
9d5c8243 AK |
3451 | return 0; |
3452 | ||
3453 | err_register: | |
3454 | igb_release_hw_control(adapter); | |
441fc6fd | 3455 | memset(&adapter->i2c_adap, 0, sizeof(adapter->i2c_adap)); |
9d5c8243 AK |
3456 | err_eeprom: |
3457 | if (!igb_check_reset_block(hw)) | |
f5f4cf08 | 3458 | igb_reset_phy(hw); |
9d5c8243 AK |
3459 | |
3460 | if (hw->flash_address) | |
3461 | iounmap(hw->flash_address); | |
9d5c8243 | 3462 | err_sw_init: |
83c21335 | 3463 | kfree(adapter->mac_table); |
42ad1a03 | 3464 | kfree(adapter->shadow_vfta); |
047e0030 | 3465 | igb_clear_interrupt_scheme(adapter); |
ceee3450 TF |
3466 | #ifdef CONFIG_PCI_IOV |
3467 | igb_disable_sriov(pdev); | |
3468 | #endif | |
73bf8048 | 3469 | pci_iounmap(pdev, adapter->io_addr); |
9d5c8243 AK |
3470 | err_ioremap: |
3471 | free_netdev(netdev); | |
3472 | err_alloc_etherdev: | |
56d766d6 | 3473 | pci_release_mem_regions(pdev); |
9d5c8243 AK |
3474 | err_pci_reg: |
3475 | err_dma: | |
3476 | pci_disable_device(pdev); | |
3477 | return err; | |
3478 | } | |
3479 | ||
fa44f2f1 | 3480 | #ifdef CONFIG_PCI_IOV |
781798a1 | 3481 | static int igb_disable_sriov(struct pci_dev *pdev) |
fa44f2f1 GR |
3482 | { |
3483 | struct net_device *netdev = pci_get_drvdata(pdev); | |
3484 | struct igb_adapter *adapter = netdev_priv(netdev); | |
3485 | struct e1000_hw *hw = &adapter->hw; | |
3486 | ||
3487 | /* reclaim resources allocated to VFs */ | |
3488 | if (adapter->vf_data) { | |
3489 | /* disable iov and allow time for transactions to clear */ | |
b09186d2 | 3490 | if (pci_vfs_assigned(pdev)) { |
fa44f2f1 GR |
3491 | dev_warn(&pdev->dev, |
3492 | "Cannot deallocate SR-IOV virtual functions while they are assigned - VFs will not be deallocated\n"); | |
3493 | return -EPERM; | |
3494 | } else { | |
3495 | pci_disable_sriov(pdev); | |
3496 | msleep(500); | |
3497 | } | |
3498 | ||
4827cc37 YK |
3499 | kfree(adapter->vf_mac_list); |
3500 | adapter->vf_mac_list = NULL; | |
fa44f2f1 GR |
3501 | kfree(adapter->vf_data); |
3502 | adapter->vf_data = NULL; | |
3503 | adapter->vfs_allocated_count = 0; | |
3504 | wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ); | |
3505 | wrfl(); | |
3506 | msleep(100); | |
3507 | dev_info(&pdev->dev, "IOV Disabled\n"); | |
3508 | ||
3509 | /* Re-enable DMA Coalescing flag since IOV is turned off */ | |
3510 | adapter->flags |= IGB_FLAG_DMAC; | |
3511 | } | |
3512 | ||
3513 | return 0; | |
3514 | } | |
3515 | ||
3516 | static int igb_enable_sriov(struct pci_dev *pdev, int num_vfs) | |
3517 | { | |
3518 | struct net_device *netdev = pci_get_drvdata(pdev); | |
3519 | struct igb_adapter *adapter = netdev_priv(netdev); | |
3520 | int old_vfs = pci_num_vf(pdev); | |
4827cc37 | 3521 | struct vf_mac_filter *mac_list; |
fa44f2f1 | 3522 | int err = 0; |
4827cc37 | 3523 | int num_vf_mac_filters, i; |
fa44f2f1 | 3524 | |
cd14ef54 | 3525 | if (!(adapter->flags & IGB_FLAG_HAS_MSIX) || num_vfs > 7) { |
50267196 MW |
3526 | err = -EPERM; |
3527 | goto out; | |
3528 | } | |
fa44f2f1 GR |
3529 | if (!num_vfs) |
3530 | goto out; | |
fa44f2f1 | 3531 | |
781798a1 SA |
3532 | if (old_vfs) { |
3533 | dev_info(&pdev->dev, "%d pre-allocated VFs found - override max_vfs setting of %d\n", | |
3534 | old_vfs, max_vfs); | |
3535 | adapter->vfs_allocated_count = old_vfs; | |
3536 | } else | |
3537 | adapter->vfs_allocated_count = num_vfs; | |
fa44f2f1 GR |
3538 | |
3539 | adapter->vf_data = kcalloc(adapter->vfs_allocated_count, | |
3540 | sizeof(struct vf_data_storage), GFP_KERNEL); | |
3541 | ||
3542 | /* if allocation failed then we do not support SR-IOV */ | |
3543 | if (!adapter->vf_data) { | |
3544 | adapter->vfs_allocated_count = 0; | |
fa44f2f1 GR |
3545 | err = -ENOMEM; |
3546 | goto out; | |
3547 | } | |
3548 | ||
4827cc37 YK |
3549 | /* Due to the limited number of RAR entries calculate potential |
3550 | * number of MAC filters available for the VFs. Reserve entries | |
3551 | * for PF default MAC, PF MAC filters and at least one RAR entry | |
3552 | * for each VF for VF MAC. | |
3553 | */ | |
3554 | num_vf_mac_filters = adapter->hw.mac.rar_entry_count - | |
3555 | (1 + IGB_PF_MAC_FILTERS_RESERVED + | |
3556 | adapter->vfs_allocated_count); | |
3557 | ||
3558 | adapter->vf_mac_list = kcalloc(num_vf_mac_filters, | |
3559 | sizeof(struct vf_mac_filter), | |
3560 | GFP_KERNEL); | |
3561 | ||
3562 | mac_list = adapter->vf_mac_list; | |
3563 | INIT_LIST_HEAD(&adapter->vf_macs.l); | |
3564 | ||
3565 | if (adapter->vf_mac_list) { | |
3566 | /* Initialize list of VF MAC filters */ | |
3567 | for (i = 0; i < num_vf_mac_filters; i++) { | |
3568 | mac_list->vf = -1; | |
3569 | mac_list->free = true; | |
3570 | list_add(&mac_list->l, &adapter->vf_macs.l); | |
3571 | mac_list++; | |
3572 | } | |
3573 | } else { | |
3574 | /* If we could not allocate memory for the VF MAC filters | |
3575 | * we can continue without this feature but warn user. | |
3576 | */ | |
3577 | dev_err(&pdev->dev, | |
3578 | "Unable to allocate memory for VF MAC filter list\n"); | |
3579 | } | |
3580 | ||
781798a1 SA |
3581 | /* only call pci_enable_sriov() if no VFs are allocated already */ |
3582 | if (!old_vfs) { | |
3583 | err = pci_enable_sriov(pdev, adapter->vfs_allocated_count); | |
3584 | if (err) | |
3585 | goto err_out; | |
3586 | } | |
fa44f2f1 GR |
3587 | dev_info(&pdev->dev, "%d VFs allocated\n", |
3588 | adapter->vfs_allocated_count); | |
3589 | for (i = 0; i < adapter->vfs_allocated_count; i++) | |
3590 | igb_vf_configure(adapter, i); | |
3591 | ||
3592 | /* DMA Coalescing is not supported in IOV mode. */ | |
3593 | adapter->flags &= ~IGB_FLAG_DMAC; | |
3594 | goto out; | |
3595 | ||
3596 | err_out: | |
4827cc37 YK |
3597 | kfree(adapter->vf_mac_list); |
3598 | adapter->vf_mac_list = NULL; | |
fa44f2f1 GR |
3599 | kfree(adapter->vf_data); |
3600 | adapter->vf_data = NULL; | |
3601 | adapter->vfs_allocated_count = 0; | |
3602 | out: | |
3603 | return err; | |
3604 | } | |
3605 | ||
3606 | #endif | |
b980ac18 | 3607 | /** |
441fc6fd CW |
3608 | * igb_remove_i2c - Cleanup I2C interface |
3609 | * @adapter: pointer to adapter structure | |
b980ac18 | 3610 | **/ |
441fc6fd CW |
3611 | static void igb_remove_i2c(struct igb_adapter *adapter) |
3612 | { | |
441fc6fd CW |
3613 | /* free the adapter bus structure */ |
3614 | i2c_del_adapter(&adapter->i2c_adap); | |
3615 | } | |
3616 | ||
9d5c8243 | 3617 | /** |
b980ac18 JK |
3618 | * igb_remove - Device Removal Routine |
3619 | * @pdev: PCI device information struct | |
9d5c8243 | 3620 | * |
b980ac18 JK |
3621 | * igb_remove is called by the PCI subsystem to alert the driver |
3622 | * that it should release a PCI device. The could be caused by a | |
3623 | * Hot-Plug event, or because the driver is going to be removed from | |
3624 | * memory. | |
9d5c8243 | 3625 | **/ |
9f9a12f8 | 3626 | static void igb_remove(struct pci_dev *pdev) |
9d5c8243 AK |
3627 | { |
3628 | struct net_device *netdev = pci_get_drvdata(pdev); | |
3629 | struct igb_adapter *adapter = netdev_priv(netdev); | |
fe4506b6 | 3630 | struct e1000_hw *hw = &adapter->hw; |
9d5c8243 | 3631 | |
749ab2cd | 3632 | pm_runtime_get_noresume(&pdev->dev); |
e428893b CW |
3633 | #ifdef CONFIG_IGB_HWMON |
3634 | igb_sysfs_exit(adapter); | |
3635 | #endif | |
441fc6fd | 3636 | igb_remove_i2c(adapter); |
a79f4f88 | 3637 | igb_ptp_stop(adapter); |
b980ac18 | 3638 | /* The watchdog timer may be rescheduled, so explicitly |
760141a5 TH |
3639 | * disable watchdog from being rescheduled. |
3640 | */ | |
9d5c8243 AK |
3641 | set_bit(__IGB_DOWN, &adapter->state); |
3642 | del_timer_sync(&adapter->watchdog_timer); | |
3643 | del_timer_sync(&adapter->phy_info_timer); | |
3644 | ||
760141a5 TH |
3645 | cancel_work_sync(&adapter->reset_task); |
3646 | cancel_work_sync(&adapter->watchdog_task); | |
9d5c8243 | 3647 | |
421e02f0 | 3648 | #ifdef CONFIG_IGB_DCA |
7dfc16fa | 3649 | if (adapter->flags & IGB_FLAG_DCA_ENABLED) { |
fe4506b6 JC |
3650 | dev_info(&pdev->dev, "DCA disabled\n"); |
3651 | dca_remove_requester(&pdev->dev); | |
7dfc16fa | 3652 | adapter->flags &= ~IGB_FLAG_DCA_ENABLED; |
cbd347ad | 3653 | wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_DISABLE); |
fe4506b6 JC |
3654 | } |
3655 | #endif | |
3656 | ||
9d5c8243 | 3657 | /* Release control of h/w to f/w. If f/w is AMT enabled, this |
b980ac18 JK |
3658 | * would have already happened in close and is redundant. |
3659 | */ | |
9d5c8243 AK |
3660 | igb_release_hw_control(adapter); |
3661 | ||
37680117 | 3662 | #ifdef CONFIG_PCI_IOV |
fa44f2f1 | 3663 | igb_disable_sriov(pdev); |
37680117 | 3664 | #endif |
559e9c49 | 3665 | |
c23d92b8 AW |
3666 | unregister_netdev(netdev); |
3667 | ||
3668 | igb_clear_interrupt_scheme(adapter); | |
3669 | ||
73bf8048 | 3670 | pci_iounmap(pdev, adapter->io_addr); |
28b0759c AD |
3671 | if (hw->flash_address) |
3672 | iounmap(hw->flash_address); | |
56d766d6 | 3673 | pci_release_mem_regions(pdev); |
9d5c8243 | 3674 | |
83c21335 | 3675 | kfree(adapter->mac_table); |
1128c756 | 3676 | kfree(adapter->shadow_vfta); |
9d5c8243 AK |
3677 | free_netdev(netdev); |
3678 | ||
19d5afd4 | 3679 | pci_disable_pcie_error_reporting(pdev); |
40a914fa | 3680 | |
9d5c8243 AK |
3681 | pci_disable_device(pdev); |
3682 | } | |
3683 | ||
a6b623e0 | 3684 | /** |
b980ac18 JK |
3685 | * igb_probe_vfs - Initialize vf data storage and add VFs to pci config space |
3686 | * @adapter: board private structure to initialize | |
a6b623e0 | 3687 | * |
b980ac18 JK |
3688 | * This function initializes the vf specific data storage and then attempts to |
3689 | * allocate the VFs. The reason for ordering it this way is because it is much | |
3690 | * mor expensive time wise to disable SR-IOV than it is to allocate and free | |
3691 | * the memory for the VFs. | |
a6b623e0 | 3692 | **/ |
9f9a12f8 | 3693 | static void igb_probe_vfs(struct igb_adapter *adapter) |
a6b623e0 AD |
3694 | { |
3695 | #ifdef CONFIG_PCI_IOV | |
3696 | struct pci_dev *pdev = adapter->pdev; | |
f96a8a0b | 3697 | struct e1000_hw *hw = &adapter->hw; |
a6b623e0 | 3698 | |
f96a8a0b CW |
3699 | /* Virtualization features not supported on i210 family. */ |
3700 | if ((hw->mac.type == e1000_i210) || (hw->mac.type == e1000_i211)) | |
3701 | return; | |
3702 | ||
be06998f JB |
3703 | /* Of the below we really only want the effect of getting |
3704 | * IGB_FLAG_HAS_MSIX set (if available), without which | |
3705 | * igb_enable_sriov() has no effect. | |
3706 | */ | |
3707 | igb_set_interrupt_capability(adapter, true); | |
3708 | igb_reset_interrupt_capability(adapter); | |
3709 | ||
fa44f2f1 | 3710 | pci_sriov_set_totalvfs(pdev, 7); |
6423fc34 | 3711 | igb_enable_sriov(pdev, max_vfs); |
0224d663 | 3712 | |
a6b623e0 AD |
3713 | #endif /* CONFIG_PCI_IOV */ |
3714 | } | |
3715 | ||
28cb2d1b | 3716 | unsigned int igb_get_max_rss_queues(struct igb_adapter *adapter) |
9d5c8243 AK |
3717 | { |
3718 | struct e1000_hw *hw = &adapter->hw; | |
28cb2d1b | 3719 | unsigned int max_rss_queues; |
9d5c8243 | 3720 | |
374a542d | 3721 | /* Determine the maximum number of RSS queues supported. */ |
f96a8a0b | 3722 | switch (hw->mac.type) { |
374a542d MV |
3723 | case e1000_i211: |
3724 | max_rss_queues = IGB_MAX_RX_QUEUES_I211; | |
3725 | break; | |
3726 | case e1000_82575: | |
f96a8a0b | 3727 | case e1000_i210: |
374a542d MV |
3728 | max_rss_queues = IGB_MAX_RX_QUEUES_82575; |
3729 | break; | |
3730 | case e1000_i350: | |
3731 | /* I350 cannot do RSS and SR-IOV at the same time */ | |
3732 | if (!!adapter->vfs_allocated_count) { | |
3733 | max_rss_queues = 1; | |
3734 | break; | |
3735 | } | |
3736 | /* fall through */ | |
3737 | case e1000_82576: | |
3738 | if (!!adapter->vfs_allocated_count) { | |
3739 | max_rss_queues = 2; | |
3740 | break; | |
3741 | } | |
3742 | /* fall through */ | |
3743 | case e1000_82580: | |
ceb5f13b | 3744 | case e1000_i354: |
374a542d MV |
3745 | default: |
3746 | max_rss_queues = IGB_MAX_RX_QUEUES; | |
f96a8a0b | 3747 | break; |
374a542d MV |
3748 | } |
3749 | ||
28cb2d1b ZS |
3750 | return max_rss_queues; |
3751 | } | |
3752 | ||
3753 | static void igb_init_queue_configuration(struct igb_adapter *adapter) | |
3754 | { | |
3755 | u32 max_rss_queues; | |
3756 | ||
3757 | max_rss_queues = igb_get_max_rss_queues(adapter); | |
374a542d MV |
3758 | adapter->rss_queues = min_t(u32, max_rss_queues, num_online_cpus()); |
3759 | ||
72ddef05 SS |
3760 | igb_set_flag_queue_pairs(adapter, max_rss_queues); |
3761 | } | |
3762 | ||
3763 | void igb_set_flag_queue_pairs(struct igb_adapter *adapter, | |
3764 | const u32 max_rss_queues) | |
3765 | { | |
3766 | struct e1000_hw *hw = &adapter->hw; | |
3767 | ||
374a542d MV |
3768 | /* Determine if we need to pair queues. */ |
3769 | switch (hw->mac.type) { | |
3770 | case e1000_82575: | |
f96a8a0b | 3771 | case e1000_i211: |
374a542d | 3772 | /* Device supports enough interrupts without queue pairing. */ |
f96a8a0b | 3773 | break; |
374a542d | 3774 | case e1000_82576: |
374a542d MV |
3775 | case e1000_82580: |
3776 | case e1000_i350: | |
ceb5f13b | 3777 | case e1000_i354: |
374a542d | 3778 | case e1000_i210: |
f96a8a0b | 3779 | default: |
b980ac18 | 3780 | /* If rss_queues > half of max_rss_queues, pair the queues in |
374a542d MV |
3781 | * order to conserve interrupts due to limited supply. |
3782 | */ | |
3783 | if (adapter->rss_queues > (max_rss_queues / 2)) | |
3784 | adapter->flags |= IGB_FLAG_QUEUE_PAIRS; | |
37a5d163 SS |
3785 | else |
3786 | adapter->flags &= ~IGB_FLAG_QUEUE_PAIRS; | |
f96a8a0b CW |
3787 | break; |
3788 | } | |
fa44f2f1 GR |
3789 | } |
3790 | ||
3791 | /** | |
b980ac18 JK |
3792 | * igb_sw_init - Initialize general software structures (struct igb_adapter) |
3793 | * @adapter: board private structure to initialize | |
fa44f2f1 | 3794 | * |
b980ac18 JK |
3795 | * igb_sw_init initializes the Adapter private data structure. |
3796 | * Fields are initialized based on PCI device information and | |
3797 | * OS network device settings (MTU size). | |
fa44f2f1 GR |
3798 | **/ |
3799 | static int igb_sw_init(struct igb_adapter *adapter) | |
3800 | { | |
3801 | struct e1000_hw *hw = &adapter->hw; | |
3802 | struct net_device *netdev = adapter->netdev; | |
3803 | struct pci_dev *pdev = adapter->pdev; | |
3804 | ||
3805 | pci_read_config_word(pdev, PCI_COMMAND, &hw->bus.pci_cmd_word); | |
3806 | ||
3807 | /* set default ring sizes */ | |
3808 | adapter->tx_ring_count = IGB_DEFAULT_TXD; | |
3809 | adapter->rx_ring_count = IGB_DEFAULT_RXD; | |
3810 | ||
3811 | /* set default ITR values */ | |
3812 | adapter->rx_itr_setting = IGB_DEFAULT_ITR; | |
3813 | adapter->tx_itr_setting = IGB_DEFAULT_ITR; | |
3814 | ||
3815 | /* set default work limits */ | |
3816 | adapter->tx_work_limit = IGB_DEFAULT_TX_WORK; | |
3817 | ||
3818 | adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN + | |
3819 | VLAN_HLEN; | |
3820 | adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN; | |
3821 | ||
0e71def2 | 3822 | spin_lock_init(&adapter->nfc_lock); |
5642e27b | 3823 | spin_lock_init(&adapter->stats64_lock); |
fa44f2f1 GR |
3824 | #ifdef CONFIG_PCI_IOV |
3825 | switch (hw->mac.type) { | |
3826 | case e1000_82576: | |
3827 | case e1000_i350: | |
3828 | if (max_vfs > 7) { | |
3829 | dev_warn(&pdev->dev, | |
3830 | "Maximum of 7 VFs per PF, using max\n"); | |
d0f63acc | 3831 | max_vfs = adapter->vfs_allocated_count = 7; |
fa44f2f1 GR |
3832 | } else |
3833 | adapter->vfs_allocated_count = max_vfs; | |
3834 | if (adapter->vfs_allocated_count) | |
3835 | dev_warn(&pdev->dev, | |
3836 | "Enabling SR-IOV VFs using the module parameter is deprecated - please use the pci sysfs interface.\n"); | |
3837 | break; | |
3838 | default: | |
3839 | break; | |
3840 | } | |
3841 | #endif /* CONFIG_PCI_IOV */ | |
3842 | ||
cbfe360a SA |
3843 | /* Assume MSI-X interrupts, will be checked during IRQ allocation */ |
3844 | adapter->flags |= IGB_FLAG_HAS_MSIX; | |
3845 | ||
6396bb22 KC |
3846 | adapter->mac_table = kcalloc(hw->mac.rar_entry_count, |
3847 | sizeof(struct igb_mac_addr), | |
15135627 | 3848 | GFP_KERNEL); |
83c21335 YK |
3849 | if (!adapter->mac_table) |
3850 | return -ENOMEM; | |
3851 | ||
ceee3450 TF |
3852 | igb_probe_vfs(adapter); |
3853 | ||
fa44f2f1 | 3854 | igb_init_queue_configuration(adapter); |
a99955fc | 3855 | |
1128c756 | 3856 | /* Setup and initialize a copy of the hw vlan table array */ |
b2adaca9 | 3857 | adapter->shadow_vfta = kcalloc(E1000_VLAN_FILTER_TBL_SIZE, sizeof(u32), |
15135627 | 3858 | GFP_KERNEL); |
18eb8636 CJ |
3859 | if (!adapter->shadow_vfta) |
3860 | return -ENOMEM; | |
1128c756 | 3861 | |
a6b623e0 | 3862 | /* This call may decrease the number of queues */ |
53c7d064 | 3863 | if (igb_init_interrupt_scheme(adapter, true)) { |
9d5c8243 AK |
3864 | dev_err(&pdev->dev, "Unable to allocate memory for queues\n"); |
3865 | return -ENOMEM; | |
3866 | } | |
3867 | ||
3868 | /* Explicitly disable IRQ since the NIC can be in any state. */ | |
3869 | igb_irq_disable(adapter); | |
3870 | ||
f96a8a0b | 3871 | if (hw->mac.type >= e1000_i350) |
831ec0b4 CW |
3872 | adapter->flags &= ~IGB_FLAG_DMAC; |
3873 | ||
9d5c8243 AK |
3874 | set_bit(__IGB_DOWN, &adapter->state); |
3875 | return 0; | |
3876 | } | |
3877 | ||
3878 | /** | |
b980ac18 JK |
3879 | * igb_open - Called when a network interface is made active |
3880 | * @netdev: network interface device structure | |
9d5c8243 | 3881 | * |
b980ac18 | 3882 | * Returns 0 on success, negative value on failure |
9d5c8243 | 3883 | * |
b980ac18 JK |
3884 | * The open entry point is called when a network interface is made |
3885 | * active by the system (IFF_UP). At this point all resources needed | |
3886 | * for transmit and receive operations are allocated, the interrupt | |
3887 | * handler is registered with the OS, the watchdog timer is started, | |
3888 | * and the stack is notified that the interface is ready. | |
9d5c8243 | 3889 | **/ |
749ab2cd | 3890 | static int __igb_open(struct net_device *netdev, bool resuming) |
9d5c8243 AK |
3891 | { |
3892 | struct igb_adapter *adapter = netdev_priv(netdev); | |
3893 | struct e1000_hw *hw = &adapter->hw; | |
749ab2cd | 3894 | struct pci_dev *pdev = adapter->pdev; |
9d5c8243 AK |
3895 | int err; |
3896 | int i; | |
3897 | ||
3898 | /* disallow open during test */ | |
749ab2cd YZ |
3899 | if (test_bit(__IGB_TESTING, &adapter->state)) { |
3900 | WARN_ON(resuming); | |
9d5c8243 | 3901 | return -EBUSY; |
749ab2cd YZ |
3902 | } |
3903 | ||
3904 | if (!resuming) | |
3905 | pm_runtime_get_sync(&pdev->dev); | |
9d5c8243 | 3906 | |
b168dfc5 JB |
3907 | netif_carrier_off(netdev); |
3908 | ||
9d5c8243 AK |
3909 | /* allocate transmit descriptors */ |
3910 | err = igb_setup_all_tx_resources(adapter); | |
3911 | if (err) | |
3912 | goto err_setup_tx; | |
3913 | ||
3914 | /* allocate receive descriptors */ | |
3915 | err = igb_setup_all_rx_resources(adapter); | |
3916 | if (err) | |
3917 | goto err_setup_rx; | |
3918 | ||
88a268c1 | 3919 | igb_power_up_link(adapter); |
9d5c8243 | 3920 | |
9d5c8243 AK |
3921 | /* before we allocate an interrupt, we must be ready to handle it. |
3922 | * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt | |
3923 | * as soon as we call pci_request_irq, so we have to setup our | |
b980ac18 JK |
3924 | * clean_rx handler before we do so. |
3925 | */ | |
9d5c8243 AK |
3926 | igb_configure(adapter); |
3927 | ||
3928 | err = igb_request_irq(adapter); | |
3929 | if (err) | |
3930 | goto err_req_irq; | |
3931 | ||
0c2cc02e AD |
3932 | /* Notify the stack of the actual queue counts. */ |
3933 | err = netif_set_real_num_tx_queues(adapter->netdev, | |
3934 | adapter->num_tx_queues); | |
3935 | if (err) | |
3936 | goto err_set_queues; | |
3937 | ||
3938 | err = netif_set_real_num_rx_queues(adapter->netdev, | |
3939 | adapter->num_rx_queues); | |
3940 | if (err) | |
3941 | goto err_set_queues; | |
3942 | ||
9d5c8243 AK |
3943 | /* From here on the code is the same as igb_up() */ |
3944 | clear_bit(__IGB_DOWN, &adapter->state); | |
3945 | ||
0d1ae7f4 AD |
3946 | for (i = 0; i < adapter->num_q_vectors; i++) |
3947 | napi_enable(&(adapter->q_vector[i]->napi)); | |
9d5c8243 AK |
3948 | |
3949 | /* Clear any pending interrupts. */ | |
1ec2297c | 3950 | rd32(E1000_TSICR); |
9d5c8243 | 3951 | rd32(E1000_ICR); |
844290e5 PW |
3952 | |
3953 | igb_irq_enable(adapter); | |
3954 | ||
d4960307 AD |
3955 | /* notify VFs that reset has been completed */ |
3956 | if (adapter->vfs_allocated_count) { | |
3957 | u32 reg_data = rd32(E1000_CTRL_EXT); | |
9005df38 | 3958 | |
d4960307 AD |
3959 | reg_data |= E1000_CTRL_EXT_PFRSTD; |
3960 | wr32(E1000_CTRL_EXT, reg_data); | |
3961 | } | |
3962 | ||
d55b53ff JK |
3963 | netif_tx_start_all_queues(netdev); |
3964 | ||
749ab2cd YZ |
3965 | if (!resuming) |
3966 | pm_runtime_put(&pdev->dev); | |
3967 | ||
25568a53 AD |
3968 | /* start the watchdog. */ |
3969 | hw->mac.get_link_status = 1; | |
3970 | schedule_work(&adapter->watchdog_task); | |
9d5c8243 AK |
3971 | |
3972 | return 0; | |
3973 | ||
0c2cc02e AD |
3974 | err_set_queues: |
3975 | igb_free_irq(adapter); | |
9d5c8243 AK |
3976 | err_req_irq: |
3977 | igb_release_hw_control(adapter); | |
88a268c1 | 3978 | igb_power_down_link(adapter); |
9d5c8243 AK |
3979 | igb_free_all_rx_resources(adapter); |
3980 | err_setup_rx: | |
3981 | igb_free_all_tx_resources(adapter); | |
3982 | err_setup_tx: | |
3983 | igb_reset(adapter); | |
749ab2cd YZ |
3984 | if (!resuming) |
3985 | pm_runtime_put(&pdev->dev); | |
9d5c8243 AK |
3986 | |
3987 | return err; | |
3988 | } | |
3989 | ||
46eafa59 | 3990 | int igb_open(struct net_device *netdev) |
749ab2cd YZ |
3991 | { |
3992 | return __igb_open(netdev, false); | |
3993 | } | |
3994 | ||
9d5c8243 | 3995 | /** |
b980ac18 JK |
3996 | * igb_close - Disables a network interface |
3997 | * @netdev: network interface device structure | |
9d5c8243 | 3998 | * |
b980ac18 | 3999 | * Returns 0, this is not allowed to fail |
9d5c8243 | 4000 | * |
b980ac18 JK |
4001 | * The close entry point is called when an interface is de-activated |
4002 | * by the OS. The hardware is still under the driver's control, but | |
4003 | * needs to be disabled. A global MAC reset is issued to stop the | |
4004 | * hardware, and all transmit and receive resources are freed. | |
9d5c8243 | 4005 | **/ |
749ab2cd | 4006 | static int __igb_close(struct net_device *netdev, bool suspending) |
9d5c8243 AK |
4007 | { |
4008 | struct igb_adapter *adapter = netdev_priv(netdev); | |
749ab2cd | 4009 | struct pci_dev *pdev = adapter->pdev; |
9d5c8243 AK |
4010 | |
4011 | WARN_ON(test_bit(__IGB_RESETTING, &adapter->state)); | |
9d5c8243 | 4012 | |
749ab2cd YZ |
4013 | if (!suspending) |
4014 | pm_runtime_get_sync(&pdev->dev); | |
4015 | ||
4016 | igb_down(adapter); | |
9d5c8243 AK |
4017 | igb_free_irq(adapter); |
4018 | ||
4019 | igb_free_all_tx_resources(adapter); | |
4020 | igb_free_all_rx_resources(adapter); | |
4021 | ||
749ab2cd YZ |
4022 | if (!suspending) |
4023 | pm_runtime_put_sync(&pdev->dev); | |
9d5c8243 AK |
4024 | return 0; |
4025 | } | |
4026 | ||
46eafa59 | 4027 | int igb_close(struct net_device *netdev) |
749ab2cd | 4028 | { |
888f2293 | 4029 | if (netif_device_present(netdev) || netdev->dismantle) |
9474933c TF |
4030 | return __igb_close(netdev, false); |
4031 | return 0; | |
749ab2cd YZ |
4032 | } |
4033 | ||
9d5c8243 | 4034 | /** |
b980ac18 JK |
4035 | * igb_setup_tx_resources - allocate Tx resources (Descriptors) |
4036 | * @tx_ring: tx descriptor ring (for a specific queue) to setup | |
9d5c8243 | 4037 | * |
b980ac18 | 4038 | * Return 0 on success, negative on failure |
9d5c8243 | 4039 | **/ |
80785298 | 4040 | int igb_setup_tx_resources(struct igb_ring *tx_ring) |
9d5c8243 | 4041 | { |
59d71989 | 4042 | struct device *dev = tx_ring->dev; |
9d5c8243 AK |
4043 | int size; |
4044 | ||
06034649 | 4045 | size = sizeof(struct igb_tx_buffer) * tx_ring->count; |
f33005a6 | 4046 | |
7cc6fd4c | 4047 | tx_ring->tx_buffer_info = vmalloc(size); |
06034649 | 4048 | if (!tx_ring->tx_buffer_info) |
9d5c8243 | 4049 | goto err; |
9d5c8243 AK |
4050 | |
4051 | /* round up to nearest 4K */ | |
85e8d004 | 4052 | tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc); |
9d5c8243 AK |
4053 | tx_ring->size = ALIGN(tx_ring->size, 4096); |
4054 | ||
5536d210 AD |
4055 | tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size, |
4056 | &tx_ring->dma, GFP_KERNEL); | |
9d5c8243 AK |
4057 | if (!tx_ring->desc) |
4058 | goto err; | |
4059 | ||
9d5c8243 AK |
4060 | tx_ring->next_to_use = 0; |
4061 | tx_ring->next_to_clean = 0; | |
81c2fc22 | 4062 | |
9d5c8243 AK |
4063 | return 0; |
4064 | ||
4065 | err: | |
06034649 | 4066 | vfree(tx_ring->tx_buffer_info); |
f33005a6 AD |
4067 | tx_ring->tx_buffer_info = NULL; |
4068 | dev_err(dev, "Unable to allocate memory for the Tx descriptor ring\n"); | |
9d5c8243 AK |
4069 | return -ENOMEM; |
4070 | } | |
4071 | ||
4072 | /** | |
b980ac18 JK |
4073 | * igb_setup_all_tx_resources - wrapper to allocate Tx resources |
4074 | * (Descriptors) for all queues | |
4075 | * @adapter: board private structure | |
9d5c8243 | 4076 | * |
b980ac18 | 4077 | * Return 0 on success, negative on failure |
9d5c8243 AK |
4078 | **/ |
4079 | static int igb_setup_all_tx_resources(struct igb_adapter *adapter) | |
4080 | { | |
439705e1 | 4081 | struct pci_dev *pdev = adapter->pdev; |
9d5c8243 AK |
4082 | int i, err = 0; |
4083 | ||
4084 | for (i = 0; i < adapter->num_tx_queues; i++) { | |
3025a446 | 4085 | err = igb_setup_tx_resources(adapter->tx_ring[i]); |
9d5c8243 | 4086 | if (err) { |
439705e1 | 4087 | dev_err(&pdev->dev, |
9d5c8243 AK |
4088 | "Allocation for Tx Queue %u failed\n", i); |
4089 | for (i--; i >= 0; i--) | |
3025a446 | 4090 | igb_free_tx_resources(adapter->tx_ring[i]); |
9d5c8243 AK |
4091 | break; |
4092 | } | |
4093 | } | |
4094 | ||
4095 | return err; | |
4096 | } | |
4097 | ||
4098 | /** | |
b980ac18 JK |
4099 | * igb_setup_tctl - configure the transmit control registers |
4100 | * @adapter: Board private structure | |
9d5c8243 | 4101 | **/ |
d7ee5b3a | 4102 | void igb_setup_tctl(struct igb_adapter *adapter) |
9d5c8243 | 4103 | { |
9d5c8243 AK |
4104 | struct e1000_hw *hw = &adapter->hw; |
4105 | u32 tctl; | |
9d5c8243 | 4106 | |
85b430b4 AD |
4107 | /* disable queue 0 which is enabled by default on 82575 and 82576 */ |
4108 | wr32(E1000_TXDCTL(0), 0); | |
9d5c8243 AK |
4109 | |
4110 | /* Program the Transmit Control Register */ | |
9d5c8243 AK |
4111 | tctl = rd32(E1000_TCTL); |
4112 | tctl &= ~E1000_TCTL_CT; | |
4113 | tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC | | |
4114 | (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT); | |
4115 | ||
4116 | igb_config_collision_dist(hw); | |
4117 | ||
9d5c8243 AK |
4118 | /* Enable transmits */ |
4119 | tctl |= E1000_TCTL_EN; | |
4120 | ||
4121 | wr32(E1000_TCTL, tctl); | |
4122 | } | |
4123 | ||
85b430b4 | 4124 | /** |
b980ac18 JK |
4125 | * igb_configure_tx_ring - Configure transmit ring after Reset |
4126 | * @adapter: board private structure | |
4127 | * @ring: tx ring to configure | |
85b430b4 | 4128 | * |
b980ac18 | 4129 | * Configure a transmit ring after a reset. |
85b430b4 | 4130 | **/ |
d7ee5b3a | 4131 | void igb_configure_tx_ring(struct igb_adapter *adapter, |
9005df38 | 4132 | struct igb_ring *ring) |
85b430b4 AD |
4133 | { |
4134 | struct e1000_hw *hw = &adapter->hw; | |
a74420e0 | 4135 | u32 txdctl = 0; |
85b430b4 AD |
4136 | u64 tdba = ring->dma; |
4137 | int reg_idx = ring->reg_idx; | |
4138 | ||
85b430b4 | 4139 | wr32(E1000_TDLEN(reg_idx), |
b980ac18 | 4140 | ring->count * sizeof(union e1000_adv_tx_desc)); |
85b430b4 | 4141 | wr32(E1000_TDBAL(reg_idx), |
b980ac18 | 4142 | tdba & 0x00000000ffffffffULL); |
85b430b4 AD |
4143 | wr32(E1000_TDBAH(reg_idx), tdba >> 32); |
4144 | ||
629823b8 | 4145 | ring->tail = adapter->io_addr + E1000_TDT(reg_idx); |
a74420e0 | 4146 | wr32(E1000_TDH(reg_idx), 0); |
fce99e34 | 4147 | writel(0, ring->tail); |
85b430b4 AD |
4148 | |
4149 | txdctl |= IGB_TX_PTHRESH; | |
4150 | txdctl |= IGB_TX_HTHRESH << 8; | |
4151 | txdctl |= IGB_TX_WTHRESH << 16; | |
4152 | ||
7cc6fd4c AD |
4153 | /* reinitialize tx_buffer_info */ |
4154 | memset(ring->tx_buffer_info, 0, | |
4155 | sizeof(struct igb_tx_buffer) * ring->count); | |
4156 | ||
85b430b4 AD |
4157 | txdctl |= E1000_TXDCTL_QUEUE_ENABLE; |
4158 | wr32(E1000_TXDCTL(reg_idx), txdctl); | |
4159 | } | |
4160 | ||
4161 | /** | |
b980ac18 JK |
4162 | * igb_configure_tx - Configure transmit Unit after Reset |
4163 | * @adapter: board private structure | |
85b430b4 | 4164 | * |
b980ac18 | 4165 | * Configure the Tx unit of the MAC after a reset. |
85b430b4 AD |
4166 | **/ |
4167 | static void igb_configure_tx(struct igb_adapter *adapter) | |
4168 | { | |
06140c79 | 4169 | struct e1000_hw *hw = &adapter->hw; |
85b430b4 AD |
4170 | int i; |
4171 | ||
06140c79 SN |
4172 | /* disable the queues */ |
4173 | for (i = 0; i < adapter->num_tx_queues; i++) | |
4174 | wr32(E1000_TXDCTL(adapter->tx_ring[i]->reg_idx), 0); | |
4175 | ||
4176 | wrfl(); | |
4177 | usleep_range(10000, 20000); | |
4178 | ||
85b430b4 | 4179 | for (i = 0; i < adapter->num_tx_queues; i++) |
3025a446 | 4180 | igb_configure_tx_ring(adapter, adapter->tx_ring[i]); |
85b430b4 AD |
4181 | } |
4182 | ||
9d5c8243 | 4183 | /** |
b980ac18 JK |
4184 | * igb_setup_rx_resources - allocate Rx resources (Descriptors) |
4185 | * @rx_ring: Rx descriptor ring (for a specific queue) to setup | |
9d5c8243 | 4186 | * |
b980ac18 | 4187 | * Returns 0 on success, negative on failure |
9d5c8243 | 4188 | **/ |
80785298 | 4189 | int igb_setup_rx_resources(struct igb_ring *rx_ring) |
9d5c8243 | 4190 | { |
59d71989 | 4191 | struct device *dev = rx_ring->dev; |
f33005a6 | 4192 | int size; |
9d5c8243 | 4193 | |
06034649 | 4194 | size = sizeof(struct igb_rx_buffer) * rx_ring->count; |
f33005a6 | 4195 | |
d2bead57 | 4196 | rx_ring->rx_buffer_info = vmalloc(size); |
06034649 | 4197 | if (!rx_ring->rx_buffer_info) |
9d5c8243 | 4198 | goto err; |
9d5c8243 | 4199 | |
9d5c8243 | 4200 | /* Round up to nearest 4K */ |
f33005a6 | 4201 | rx_ring->size = rx_ring->count * sizeof(union e1000_adv_rx_desc); |
9d5c8243 AK |
4202 | rx_ring->size = ALIGN(rx_ring->size, 4096); |
4203 | ||
5536d210 AD |
4204 | rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size, |
4205 | &rx_ring->dma, GFP_KERNEL); | |
9d5c8243 AK |
4206 | if (!rx_ring->desc) |
4207 | goto err; | |
4208 | ||
cbc8e55f | 4209 | rx_ring->next_to_alloc = 0; |
9d5c8243 AK |
4210 | rx_ring->next_to_clean = 0; |
4211 | rx_ring->next_to_use = 0; | |
9d5c8243 | 4212 | |
9d5c8243 AK |
4213 | return 0; |
4214 | ||
4215 | err: | |
06034649 AD |
4216 | vfree(rx_ring->rx_buffer_info); |
4217 | rx_ring->rx_buffer_info = NULL; | |
f33005a6 | 4218 | dev_err(dev, "Unable to allocate memory for the Rx descriptor ring\n"); |
9d5c8243 AK |
4219 | return -ENOMEM; |
4220 | } | |
4221 | ||
4222 | /** | |
b980ac18 JK |
4223 | * igb_setup_all_rx_resources - wrapper to allocate Rx resources |
4224 | * (Descriptors) for all queues | |
4225 | * @adapter: board private structure | |
9d5c8243 | 4226 | * |
b980ac18 | 4227 | * Return 0 on success, negative on failure |
9d5c8243 AK |
4228 | **/ |
4229 | static int igb_setup_all_rx_resources(struct igb_adapter *adapter) | |
4230 | { | |
439705e1 | 4231 | struct pci_dev *pdev = adapter->pdev; |
9d5c8243 AK |
4232 | int i, err = 0; |
4233 | ||
4234 | for (i = 0; i < adapter->num_rx_queues; i++) { | |
3025a446 | 4235 | err = igb_setup_rx_resources(adapter->rx_ring[i]); |
9d5c8243 | 4236 | if (err) { |
439705e1 | 4237 | dev_err(&pdev->dev, |
9d5c8243 AK |
4238 | "Allocation for Rx Queue %u failed\n", i); |
4239 | for (i--; i >= 0; i--) | |
3025a446 | 4240 | igb_free_rx_resources(adapter->rx_ring[i]); |
9d5c8243 AK |
4241 | break; |
4242 | } | |
4243 | } | |
4244 | ||
4245 | return err; | |
4246 | } | |
4247 | ||
06cf2666 | 4248 | /** |
b980ac18 JK |
4249 | * igb_setup_mrqc - configure the multiple receive queue control registers |
4250 | * @adapter: Board private structure | |
06cf2666 AD |
4251 | **/ |
4252 | static void igb_setup_mrqc(struct igb_adapter *adapter) | |
4253 | { | |
4254 | struct e1000_hw *hw = &adapter->hw; | |
4255 | u32 mrqc, rxcsum; | |
ed12cc9a | 4256 | u32 j, num_rx_queues; |
eb31f849 | 4257 | u32 rss_key[10]; |
06cf2666 | 4258 | |
eb31f849 | 4259 | netdev_rss_key_fill(rss_key, sizeof(rss_key)); |
a57fe23e | 4260 | for (j = 0; j < 10; j++) |
eb31f849 | 4261 | wr32(E1000_RSSRK(j), rss_key[j]); |
06cf2666 | 4262 | |
a99955fc | 4263 | num_rx_queues = adapter->rss_queues; |
06cf2666 | 4264 | |
797fd4be | 4265 | switch (hw->mac.type) { |
797fd4be AD |
4266 | case e1000_82576: |
4267 | /* 82576 supports 2 RSS queues for SR-IOV */ | |
ed12cc9a | 4268 | if (adapter->vfs_allocated_count) |
06cf2666 | 4269 | num_rx_queues = 2; |
797fd4be AD |
4270 | break; |
4271 | default: | |
4272 | break; | |
06cf2666 AD |
4273 | } |
4274 | ||
ed12cc9a LMV |
4275 | if (adapter->rss_indir_tbl_init != num_rx_queues) { |
4276 | for (j = 0; j < IGB_RETA_SIZE; j++) | |
c502ea2e CW |
4277 | adapter->rss_indir_tbl[j] = |
4278 | (j * num_rx_queues) / IGB_RETA_SIZE; | |
ed12cc9a | 4279 | adapter->rss_indir_tbl_init = num_rx_queues; |
06cf2666 | 4280 | } |
ed12cc9a | 4281 | igb_write_rss_indir_tbl(adapter); |
06cf2666 | 4282 | |
b980ac18 | 4283 | /* Disable raw packet checksumming so that RSS hash is placed in |
06cf2666 AD |
4284 | * descriptor on writeback. No need to enable TCP/UDP/IP checksum |
4285 | * offloads as they are enabled by default | |
4286 | */ | |
4287 | rxcsum = rd32(E1000_RXCSUM); | |
4288 | rxcsum |= E1000_RXCSUM_PCSD; | |
4289 | ||
4290 | if (adapter->hw.mac.type >= e1000_82576) | |
4291 | /* Enable Receive Checksum Offload for SCTP */ | |
4292 | rxcsum |= E1000_RXCSUM_CRCOFL; | |
4293 | ||
4294 | /* Don't need to set TUOFL or IPOFL, they default to 1 */ | |
4295 | wr32(E1000_RXCSUM, rxcsum); | |
f96a8a0b | 4296 | |
039454a8 AA |
4297 | /* Generate RSS hash based on packet types, TCP/UDP |
4298 | * port numbers and/or IPv4/v6 src and dst addresses | |
4299 | */ | |
f96a8a0b CW |
4300 | mrqc = E1000_MRQC_RSS_FIELD_IPV4 | |
4301 | E1000_MRQC_RSS_FIELD_IPV4_TCP | | |
4302 | E1000_MRQC_RSS_FIELD_IPV6 | | |
4303 | E1000_MRQC_RSS_FIELD_IPV6_TCP | | |
4304 | E1000_MRQC_RSS_FIELD_IPV6_TCP_EX; | |
06cf2666 | 4305 | |
039454a8 AA |
4306 | if (adapter->flags & IGB_FLAG_RSS_FIELD_IPV4_UDP) |
4307 | mrqc |= E1000_MRQC_RSS_FIELD_IPV4_UDP; | |
4308 | if (adapter->flags & IGB_FLAG_RSS_FIELD_IPV6_UDP) | |
4309 | mrqc |= E1000_MRQC_RSS_FIELD_IPV6_UDP; | |
4310 | ||
06cf2666 AD |
4311 | /* If VMDq is enabled then we set the appropriate mode for that, else |
4312 | * we default to RSS so that an RSS hash is calculated per packet even | |
b980ac18 JK |
4313 | * if we are only using one queue |
4314 | */ | |
06cf2666 AD |
4315 | if (adapter->vfs_allocated_count) { |
4316 | if (hw->mac.type > e1000_82575) { | |
4317 | /* Set the default pool for the PF's first queue */ | |
4318 | u32 vtctl = rd32(E1000_VT_CTL); | |
9005df38 | 4319 | |
06cf2666 AD |
4320 | vtctl &= ~(E1000_VT_CTL_DEFAULT_POOL_MASK | |
4321 | E1000_VT_CTL_DISABLE_DEF_POOL); | |
4322 | vtctl |= adapter->vfs_allocated_count << | |
4323 | E1000_VT_CTL_DEFAULT_POOL_SHIFT; | |
4324 | wr32(E1000_VT_CTL, vtctl); | |
4325 | } | |
a99955fc | 4326 | if (adapter->rss_queues > 1) |
c883de9f | 4327 | mrqc |= E1000_MRQC_ENABLE_VMDQ_RSS_MQ; |
06cf2666 | 4328 | else |
f96a8a0b | 4329 | mrqc |= E1000_MRQC_ENABLE_VMDQ; |
06cf2666 | 4330 | } else { |
f96a8a0b | 4331 | if (hw->mac.type != e1000_i211) |
c883de9f | 4332 | mrqc |= E1000_MRQC_ENABLE_RSS_MQ; |
06cf2666 AD |
4333 | } |
4334 | igb_vmm_control(adapter); | |
4335 | ||
06cf2666 AD |
4336 | wr32(E1000_MRQC, mrqc); |
4337 | } | |
4338 | ||
9d5c8243 | 4339 | /** |
b980ac18 JK |
4340 | * igb_setup_rctl - configure the receive control registers |
4341 | * @adapter: Board private structure | |
9d5c8243 | 4342 | **/ |
d7ee5b3a | 4343 | void igb_setup_rctl(struct igb_adapter *adapter) |
9d5c8243 AK |
4344 | { |
4345 | struct e1000_hw *hw = &adapter->hw; | |
4346 | u32 rctl; | |
9d5c8243 AK |
4347 | |
4348 | rctl = rd32(E1000_RCTL); | |
4349 | ||
4350 | rctl &= ~(3 << E1000_RCTL_MO_SHIFT); | |
69d728ba | 4351 | rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC); |
9d5c8243 | 4352 | |
69d728ba | 4353 | rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_RDMTS_HALF | |
28b0759c | 4354 | (hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT); |
9d5c8243 | 4355 | |
b980ac18 | 4356 | /* enable stripping of CRC. It's unlikely this will break BMC |
87cb7e8c AK |
4357 | * redirection as it did with e1000. Newer features require |
4358 | * that the HW strips the CRC. | |
73cd78f1 | 4359 | */ |
87cb7e8c | 4360 | rctl |= E1000_RCTL_SECRC; |
9d5c8243 | 4361 | |
559e9c49 | 4362 | /* disable store bad packets and clear size bits. */ |
ec54d7d6 | 4363 | rctl &= ~(E1000_RCTL_SBP | E1000_RCTL_SZ_256); |
9d5c8243 | 4364 | |
45693bcb | 4365 | /* enable LPE to allow for reception of jumbo frames */ |
6ec43fe6 | 4366 | rctl |= E1000_RCTL_LPE; |
9d5c8243 | 4367 | |
952f72a8 AD |
4368 | /* disable queue 0 to prevent tail write w/o re-config */ |
4369 | wr32(E1000_RXDCTL(0), 0); | |
9d5c8243 | 4370 | |
e1739522 AD |
4371 | /* Attention!!! For SR-IOV PF driver operations you must enable |
4372 | * queue drop for all VF and PF queues to prevent head of line blocking | |
4373 | * if an un-trusted VF does not provide descriptors to hardware. | |
4374 | */ | |
4375 | if (adapter->vfs_allocated_count) { | |
e1739522 AD |
4376 | /* set all queue drop enable bits */ |
4377 | wr32(E1000_QDE, ALL_QUEUES); | |
e1739522 AD |
4378 | } |
4379 | ||
89eaefb6 BG |
4380 | /* This is useful for sniffing bad packets. */ |
4381 | if (adapter->netdev->features & NETIF_F_RXALL) { | |
4382 | /* UPE and MPE will be handled by normal PROMISC logic | |
b980ac18 JK |
4383 | * in e1000e_set_rx_mode |
4384 | */ | |
89eaefb6 BG |
4385 | rctl |= (E1000_RCTL_SBP | /* Receive bad packets */ |
4386 | E1000_RCTL_BAM | /* RX All Bcast Pkts */ | |
4387 | E1000_RCTL_PMCF); /* RX All MAC Ctrl Pkts */ | |
4388 | ||
16903caa | 4389 | rctl &= ~(E1000_RCTL_DPF | /* Allow filtered pause */ |
89eaefb6 BG |
4390 | E1000_RCTL_CFIEN); /* Dis VLAN CFIEN Filter */ |
4391 | /* Do not mess with E1000_CTRL_VME, it affects transmit as well, | |
4392 | * and that breaks VLANs. | |
4393 | */ | |
4394 | } | |
4395 | ||
9d5c8243 AK |
4396 | wr32(E1000_RCTL, rctl); |
4397 | } | |
4398 | ||
7d5753f0 | 4399 | static inline int igb_set_vf_rlpml(struct igb_adapter *adapter, int size, |
9005df38 | 4400 | int vfn) |
7d5753f0 AD |
4401 | { |
4402 | struct e1000_hw *hw = &adapter->hw; | |
4403 | u32 vmolr; | |
4404 | ||
d3836f8e AD |
4405 | if (size > MAX_JUMBO_FRAME_SIZE) |
4406 | size = MAX_JUMBO_FRAME_SIZE; | |
7d5753f0 AD |
4407 | |
4408 | vmolr = rd32(E1000_VMOLR(vfn)); | |
4409 | vmolr &= ~E1000_VMOLR_RLPML_MASK; | |
4410 | vmolr |= size | E1000_VMOLR_LPE; | |
4411 | wr32(E1000_VMOLR(vfn), vmolr); | |
4412 | ||
4413 | return 0; | |
4414 | } | |
4415 | ||
030f9f52 CV |
4416 | static inline void igb_set_vf_vlan_strip(struct igb_adapter *adapter, |
4417 | int vfn, bool enable) | |
e1739522 | 4418 | { |
e1739522 | 4419 | struct e1000_hw *hw = &adapter->hw; |
030f9f52 | 4420 | u32 val, reg; |
e1739522 | 4421 | |
030f9f52 CV |
4422 | if (hw->mac.type < e1000_82576) |
4423 | return; | |
e1739522 | 4424 | |
030f9f52 CV |
4425 | if (hw->mac.type == e1000_i350) |
4426 | reg = E1000_DVMOLR(vfn); | |
4427 | else | |
4428 | reg = E1000_VMOLR(vfn); | |
4429 | ||
4430 | val = rd32(reg); | |
4431 | if (enable) | |
4432 | val |= E1000_VMOLR_STRVLAN; | |
4433 | else | |
4434 | val &= ~(E1000_VMOLR_STRVLAN); | |
4435 | wr32(reg, val); | |
e1739522 AD |
4436 | } |
4437 | ||
8151d294 WM |
4438 | static inline void igb_set_vmolr(struct igb_adapter *adapter, |
4439 | int vfn, bool aupe) | |
7d5753f0 AD |
4440 | { |
4441 | struct e1000_hw *hw = &adapter->hw; | |
4442 | u32 vmolr; | |
4443 | ||
b980ac18 | 4444 | /* This register exists only on 82576 and newer so if we are older then |
7d5753f0 AD |
4445 | * we should exit and do nothing |
4446 | */ | |
4447 | if (hw->mac.type < e1000_82576) | |
4448 | return; | |
4449 | ||
4450 | vmolr = rd32(E1000_VMOLR(vfn)); | |
8151d294 | 4451 | if (aupe) |
b980ac18 | 4452 | vmolr |= E1000_VMOLR_AUPE; /* Accept untagged packets */ |
8151d294 WM |
4453 | else |
4454 | vmolr &= ~(E1000_VMOLR_AUPE); /* Tagged packets ONLY */ | |
7d5753f0 AD |
4455 | |
4456 | /* clear all bits that might not be set */ | |
4457 | vmolr &= ~(E1000_VMOLR_BAM | E1000_VMOLR_RSSE); | |
4458 | ||
a99955fc | 4459 | if (adapter->rss_queues > 1 && vfn == adapter->vfs_allocated_count) |
7d5753f0 | 4460 | vmolr |= E1000_VMOLR_RSSE; /* enable RSS */ |
b980ac18 | 4461 | /* for VMDq only allow the VFs and pool 0 to accept broadcast and |
7d5753f0 AD |
4462 | * multicast packets |
4463 | */ | |
4464 | if (vfn <= adapter->vfs_allocated_count) | |
b980ac18 | 4465 | vmolr |= E1000_VMOLR_BAM; /* Accept broadcast */ |
7d5753f0 AD |
4466 | |
4467 | wr32(E1000_VMOLR(vfn), vmolr); | |
4468 | } | |
4469 | ||
85b430b4 | 4470 | /** |
b980ac18 JK |
4471 | * igb_configure_rx_ring - Configure a receive ring after Reset |
4472 | * @adapter: board private structure | |
4473 | * @ring: receive ring to be configured | |
85b430b4 | 4474 | * |
b980ac18 | 4475 | * Configure the Rx unit of the MAC after a reset. |
85b430b4 | 4476 | **/ |
d7ee5b3a | 4477 | void igb_configure_rx_ring(struct igb_adapter *adapter, |
b980ac18 | 4478 | struct igb_ring *ring) |
85b430b4 AD |
4479 | { |
4480 | struct e1000_hw *hw = &adapter->hw; | |
7ec0116c | 4481 | union e1000_adv_rx_desc *rx_desc; |
85b430b4 AD |
4482 | u64 rdba = ring->dma; |
4483 | int reg_idx = ring->reg_idx; | |
a74420e0 | 4484 | u32 srrctl = 0, rxdctl = 0; |
85b430b4 AD |
4485 | |
4486 | /* disable the queue */ | |
a74420e0 | 4487 | wr32(E1000_RXDCTL(reg_idx), 0); |
85b430b4 AD |
4488 | |
4489 | /* Set DMA base address registers */ | |
4490 | wr32(E1000_RDBAL(reg_idx), | |
4491 | rdba & 0x00000000ffffffffULL); | |
4492 | wr32(E1000_RDBAH(reg_idx), rdba >> 32); | |
4493 | wr32(E1000_RDLEN(reg_idx), | |
b980ac18 | 4494 | ring->count * sizeof(union e1000_adv_rx_desc)); |
85b430b4 AD |
4495 | |
4496 | /* initialize head and tail */ | |
629823b8 | 4497 | ring->tail = adapter->io_addr + E1000_RDT(reg_idx); |
a74420e0 | 4498 | wr32(E1000_RDH(reg_idx), 0); |
fce99e34 | 4499 | writel(0, ring->tail); |
85b430b4 | 4500 | |
952f72a8 | 4501 | /* set descriptor configuration */ |
44390ca6 | 4502 | srrctl = IGB_RX_HDR_LEN << E1000_SRRCTL_BSIZEHDRSIZE_SHIFT; |
8649aaef AD |
4503 | if (ring_uses_large_buffer(ring)) |
4504 | srrctl |= IGB_RXBUFFER_3072 >> E1000_SRRCTL_BSIZEPKT_SHIFT; | |
4505 | else | |
4506 | srrctl |= IGB_RXBUFFER_2048 >> E1000_SRRCTL_BSIZEPKT_SHIFT; | |
1a1c225b | 4507 | srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF; |
06218a8d | 4508 | if (hw->mac.type >= e1000_82580) |
757b77e2 | 4509 | srrctl |= E1000_SRRCTL_TIMESTAMP; |
e6bdb6fe NN |
4510 | /* Only set Drop Enable if we are supporting multiple queues */ |
4511 | if (adapter->vfs_allocated_count || adapter->num_rx_queues > 1) | |
4512 | srrctl |= E1000_SRRCTL_DROP_EN; | |
952f72a8 AD |
4513 | |
4514 | wr32(E1000_SRRCTL(reg_idx), srrctl); | |
4515 | ||
7d5753f0 | 4516 | /* set filtering for VMDQ pools */ |
8151d294 | 4517 | igb_set_vmolr(adapter, reg_idx & 0x7, true); |
7d5753f0 | 4518 | |
85b430b4 AD |
4519 | rxdctl |= IGB_RX_PTHRESH; |
4520 | rxdctl |= IGB_RX_HTHRESH << 8; | |
4521 | rxdctl |= IGB_RX_WTHRESH << 16; | |
a74420e0 | 4522 | |
d2bead57 AD |
4523 | /* initialize rx_buffer_info */ |
4524 | memset(ring->rx_buffer_info, 0, | |
4525 | sizeof(struct igb_rx_buffer) * ring->count); | |
4526 | ||
7ec0116c AD |
4527 | /* initialize Rx descriptor 0 */ |
4528 | rx_desc = IGB_RX_DESC(ring, 0); | |
4529 | rx_desc->wb.upper.length = 0; | |
4530 | ||
a74420e0 AD |
4531 | /* enable receive descriptor fetching */ |
4532 | rxdctl |= E1000_RXDCTL_QUEUE_ENABLE; | |
85b430b4 AD |
4533 | wr32(E1000_RXDCTL(reg_idx), rxdctl); |
4534 | } | |
4535 | ||
8649aaef AD |
4536 | static void igb_set_rx_buffer_len(struct igb_adapter *adapter, |
4537 | struct igb_ring *rx_ring) | |
4538 | { | |
4539 | /* set build_skb and buffer size flags */ | |
e3cdf68d | 4540 | clear_ring_build_skb_enabled(rx_ring); |
8649aaef AD |
4541 | clear_ring_uses_large_buffer(rx_ring); |
4542 | ||
4543 | if (adapter->flags & IGB_FLAG_RX_LEGACY) | |
4544 | return; | |
4545 | ||
e3cdf68d AD |
4546 | set_ring_build_skb_enabled(rx_ring); |
4547 | ||
8649aaef AD |
4548 | #if (PAGE_SIZE < 8192) |
4549 | if (adapter->max_frame_size <= IGB_MAX_FRAME_BUILD_SKB) | |
4550 | return; | |
4551 | ||
4552 | set_ring_uses_large_buffer(rx_ring); | |
4553 | #endif | |
4554 | } | |
4555 | ||
9d5c8243 | 4556 | /** |
b980ac18 JK |
4557 | * igb_configure_rx - Configure receive Unit after Reset |
4558 | * @adapter: board private structure | |
9d5c8243 | 4559 | * |
b980ac18 | 4560 | * Configure the Rx unit of the MAC after a reset. |
9d5c8243 AK |
4561 | **/ |
4562 | static void igb_configure_rx(struct igb_adapter *adapter) | |
4563 | { | |
9107584e | 4564 | int i; |
9d5c8243 | 4565 | |
26ad9178 | 4566 | /* set the correct pool for the PF default MAC address in entry 0 */ |
83c21335 | 4567 | igb_set_default_mac_filter(adapter); |
26ad9178 | 4568 | |
06cf2666 | 4569 | /* Setup the HW Rx Head and Tail Descriptor Pointers and |
b980ac18 JK |
4570 | * the Base and Length of the Rx Descriptor Ring |
4571 | */ | |
8649aaef AD |
4572 | for (i = 0; i < adapter->num_rx_queues; i++) { |
4573 | struct igb_ring *rx_ring = adapter->rx_ring[i]; | |
4574 | ||
4575 | igb_set_rx_buffer_len(adapter, rx_ring); | |
4576 | igb_configure_rx_ring(adapter, rx_ring); | |
4577 | } | |
9d5c8243 AK |
4578 | } |
4579 | ||
4580 | /** | |
b980ac18 JK |
4581 | * igb_free_tx_resources - Free Tx Resources per Queue |
4582 | * @tx_ring: Tx descriptor ring for a specific queue | |
9d5c8243 | 4583 | * |
b980ac18 | 4584 | * Free all transmit software resources |
9d5c8243 | 4585 | **/ |
68fd9910 | 4586 | void igb_free_tx_resources(struct igb_ring *tx_ring) |
9d5c8243 | 4587 | { |
3b644cf6 | 4588 | igb_clean_tx_ring(tx_ring); |
9d5c8243 | 4589 | |
06034649 AD |
4590 | vfree(tx_ring->tx_buffer_info); |
4591 | tx_ring->tx_buffer_info = NULL; | |
9d5c8243 | 4592 | |
439705e1 AD |
4593 | /* if not set, then don't free */ |
4594 | if (!tx_ring->desc) | |
4595 | return; | |
4596 | ||
59d71989 AD |
4597 | dma_free_coherent(tx_ring->dev, tx_ring->size, |
4598 | tx_ring->desc, tx_ring->dma); | |
9d5c8243 AK |
4599 | |
4600 | tx_ring->desc = NULL; | |
4601 | } | |
4602 | ||
4603 | /** | |
b980ac18 JK |
4604 | * igb_free_all_tx_resources - Free Tx Resources for All Queues |
4605 | * @adapter: board private structure | |
9d5c8243 | 4606 | * |
b980ac18 | 4607 | * Free all transmit software resources |
9d5c8243 AK |
4608 | **/ |
4609 | static void igb_free_all_tx_resources(struct igb_adapter *adapter) | |
4610 | { | |
4611 | int i; | |
4612 | ||
4613 | for (i = 0; i < adapter->num_tx_queues; i++) | |
17a402a0 CW |
4614 | if (adapter->tx_ring[i]) |
4615 | igb_free_tx_resources(adapter->tx_ring[i]); | |
9d5c8243 AK |
4616 | } |
4617 | ||
9d5c8243 | 4618 | /** |
b980ac18 JK |
4619 | * igb_clean_tx_ring - Free Tx Buffers |
4620 | * @tx_ring: ring to be cleaned | |
9d5c8243 | 4621 | **/ |
3b644cf6 | 4622 | static void igb_clean_tx_ring(struct igb_ring *tx_ring) |
9d5c8243 | 4623 | { |
7cc6fd4c AD |
4624 | u16 i = tx_ring->next_to_clean; |
4625 | struct igb_tx_buffer *tx_buffer = &tx_ring->tx_buffer_info[i]; | |
9d5c8243 | 4626 | |
7cc6fd4c AD |
4627 | while (i != tx_ring->next_to_use) { |
4628 | union e1000_adv_tx_desc *eop_desc, *tx_desc; | |
9d5c8243 | 4629 | |
7cc6fd4c AD |
4630 | /* Free all the Tx ring sk_buffs */ |
4631 | dev_kfree_skb_any(tx_buffer->skb); | |
9d5c8243 | 4632 | |
7cc6fd4c AD |
4633 | /* unmap skb header data */ |
4634 | dma_unmap_single(tx_ring->dev, | |
4635 | dma_unmap_addr(tx_buffer, dma), | |
4636 | dma_unmap_len(tx_buffer, len), | |
4637 | DMA_TO_DEVICE); | |
dad8a3b3 | 4638 | |
7cc6fd4c AD |
4639 | /* check for eop_desc to determine the end of the packet */ |
4640 | eop_desc = tx_buffer->next_to_watch; | |
4641 | tx_desc = IGB_TX_DESC(tx_ring, i); | |
4642 | ||
4643 | /* unmap remaining buffers */ | |
4644 | while (tx_desc != eop_desc) { | |
4645 | tx_buffer++; | |
4646 | tx_desc++; | |
4647 | i++; | |
4648 | if (unlikely(i == tx_ring->count)) { | |
4649 | i = 0; | |
4650 | tx_buffer = tx_ring->tx_buffer_info; | |
4651 | tx_desc = IGB_TX_DESC(tx_ring, 0); | |
4652 | } | |
4653 | ||
4654 | /* unmap any remaining paged data */ | |
4655 | if (dma_unmap_len(tx_buffer, len)) | |
4656 | dma_unmap_page(tx_ring->dev, | |
4657 | dma_unmap_addr(tx_buffer, dma), | |
4658 | dma_unmap_len(tx_buffer, len), | |
4659 | DMA_TO_DEVICE); | |
4660 | } | |
9d5c8243 | 4661 | |
7cc6fd4c AD |
4662 | /* move us one more past the eop_desc for start of next pkt */ |
4663 | tx_buffer++; | |
4664 | i++; | |
4665 | if (unlikely(i == tx_ring->count)) { | |
4666 | i = 0; | |
4667 | tx_buffer = tx_ring->tx_buffer_info; | |
4668 | } | |
4669 | } | |
4670 | ||
4671 | /* reset BQL for queue */ | |
4672 | netdev_tx_reset_queue(txring_txq(tx_ring)); | |
9d5c8243 | 4673 | |
7cc6fd4c | 4674 | /* reset next_to_use and next_to_clean */ |
9d5c8243 AK |
4675 | tx_ring->next_to_use = 0; |
4676 | tx_ring->next_to_clean = 0; | |
9d5c8243 AK |
4677 | } |
4678 | ||
4679 | /** | |
b980ac18 JK |
4680 | * igb_clean_all_tx_rings - Free Tx Buffers for all queues |
4681 | * @adapter: board private structure | |
9d5c8243 AK |
4682 | **/ |
4683 | static void igb_clean_all_tx_rings(struct igb_adapter *adapter) | |
4684 | { | |
4685 | int i; | |
4686 | ||
4687 | for (i = 0; i < adapter->num_tx_queues; i++) | |
17a402a0 CW |
4688 | if (adapter->tx_ring[i]) |
4689 | igb_clean_tx_ring(adapter->tx_ring[i]); | |
9d5c8243 AK |
4690 | } |
4691 | ||
4692 | /** | |
b980ac18 JK |
4693 | * igb_free_rx_resources - Free Rx Resources |
4694 | * @rx_ring: ring to clean the resources from | |
9d5c8243 | 4695 | * |
b980ac18 | 4696 | * Free all receive software resources |
9d5c8243 | 4697 | **/ |
68fd9910 | 4698 | void igb_free_rx_resources(struct igb_ring *rx_ring) |
9d5c8243 | 4699 | { |
3b644cf6 | 4700 | igb_clean_rx_ring(rx_ring); |
9d5c8243 | 4701 | |
06034649 AD |
4702 | vfree(rx_ring->rx_buffer_info); |
4703 | rx_ring->rx_buffer_info = NULL; | |
9d5c8243 | 4704 | |
439705e1 AD |
4705 | /* if not set, then don't free */ |
4706 | if (!rx_ring->desc) | |
4707 | return; | |
4708 | ||
59d71989 AD |
4709 | dma_free_coherent(rx_ring->dev, rx_ring->size, |
4710 | rx_ring->desc, rx_ring->dma); | |
9d5c8243 AK |
4711 | |
4712 | rx_ring->desc = NULL; | |
4713 | } | |
4714 | ||
4715 | /** | |
b980ac18 JK |
4716 | * igb_free_all_rx_resources - Free Rx Resources for All Queues |
4717 | * @adapter: board private structure | |
9d5c8243 | 4718 | * |
b980ac18 | 4719 | * Free all receive software resources |
9d5c8243 AK |
4720 | **/ |
4721 | static void igb_free_all_rx_resources(struct igb_adapter *adapter) | |
4722 | { | |
4723 | int i; | |
4724 | ||
4725 | for (i = 0; i < adapter->num_rx_queues; i++) | |
17a402a0 CW |
4726 | if (adapter->rx_ring[i]) |
4727 | igb_free_rx_resources(adapter->rx_ring[i]); | |
9d5c8243 AK |
4728 | } |
4729 | ||
4730 | /** | |
b980ac18 JK |
4731 | * igb_clean_rx_ring - Free Rx Buffers per Queue |
4732 | * @rx_ring: ring to free buffers from | |
9d5c8243 | 4733 | **/ |
3b644cf6 | 4734 | static void igb_clean_rx_ring(struct igb_ring *rx_ring) |
9d5c8243 | 4735 | { |
d2bead57 | 4736 | u16 i = rx_ring->next_to_clean; |
9d5c8243 | 4737 | |
399e06a5 | 4738 | dev_kfree_skb(rx_ring->skb); |
1a1c225b AD |
4739 | rx_ring->skb = NULL; |
4740 | ||
9d5c8243 | 4741 | /* Free all the Rx ring sk_buffs */ |
d2bead57 | 4742 | while (i != rx_ring->next_to_alloc) { |
06034649 | 4743 | struct igb_rx_buffer *buffer_info = &rx_ring->rx_buffer_info[i]; |
9d5c8243 | 4744 | |
5be59554 AD |
4745 | /* Invalidate cache lines that may have been written to by |
4746 | * device so that we avoid corrupting memory. | |
4747 | */ | |
4748 | dma_sync_single_range_for_cpu(rx_ring->dev, | |
4749 | buffer_info->dma, | |
4750 | buffer_info->page_offset, | |
8649aaef | 4751 | igb_rx_bufsz(rx_ring), |
5be59554 AD |
4752 | DMA_FROM_DEVICE); |
4753 | ||
4754 | /* free resources associated with mapping */ | |
4755 | dma_unmap_page_attrs(rx_ring->dev, | |
4756 | buffer_info->dma, | |
8649aaef | 4757 | igb_rx_pg_size(rx_ring), |
5be59554 | 4758 | DMA_FROM_DEVICE, |
7bd17592 | 4759 | IGB_RX_DMA_ATTR); |
2976db80 AD |
4760 | __page_frag_cache_drain(buffer_info->page, |
4761 | buffer_info->pagecnt_bias); | |
cbc8e55f | 4762 | |
d2bead57 AD |
4763 | i++; |
4764 | if (i == rx_ring->count) | |
4765 | i = 0; | |
9d5c8243 AK |
4766 | } |
4767 | ||
cbc8e55f | 4768 | rx_ring->next_to_alloc = 0; |
9d5c8243 AK |
4769 | rx_ring->next_to_clean = 0; |
4770 | rx_ring->next_to_use = 0; | |
9d5c8243 AK |
4771 | } |
4772 | ||
4773 | /** | |
b980ac18 JK |
4774 | * igb_clean_all_rx_rings - Free Rx Buffers for all queues |
4775 | * @adapter: board private structure | |
9d5c8243 AK |
4776 | **/ |
4777 | static void igb_clean_all_rx_rings(struct igb_adapter *adapter) | |
4778 | { | |
4779 | int i; | |
4780 | ||
4781 | for (i = 0; i < adapter->num_rx_queues; i++) | |
17a402a0 CW |
4782 | if (adapter->rx_ring[i]) |
4783 | igb_clean_rx_ring(adapter->rx_ring[i]); | |
9d5c8243 AK |
4784 | } |
4785 | ||
4786 | /** | |
b980ac18 JK |
4787 | * igb_set_mac - Change the Ethernet Address of the NIC |
4788 | * @netdev: network interface device structure | |
4789 | * @p: pointer to an address structure | |
9d5c8243 | 4790 | * |
b980ac18 | 4791 | * Returns 0 on success, negative on failure |
9d5c8243 AK |
4792 | **/ |
4793 | static int igb_set_mac(struct net_device *netdev, void *p) | |
4794 | { | |
4795 | struct igb_adapter *adapter = netdev_priv(netdev); | |
28b0759c | 4796 | struct e1000_hw *hw = &adapter->hw; |
9d5c8243 AK |
4797 | struct sockaddr *addr = p; |
4798 | ||
4799 | if (!is_valid_ether_addr(addr->sa_data)) | |
4800 | return -EADDRNOTAVAIL; | |
4801 | ||
4802 | memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); | |
28b0759c | 4803 | memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len); |
9d5c8243 | 4804 | |
26ad9178 | 4805 | /* set the correct pool for the new PF MAC address in entry 0 */ |
83c21335 | 4806 | igb_set_default_mac_filter(adapter); |
e1739522 | 4807 | |
9d5c8243 AK |
4808 | return 0; |
4809 | } | |
4810 | ||
4811 | /** | |
b980ac18 JK |
4812 | * igb_write_mc_addr_list - write multicast addresses to MTA |
4813 | * @netdev: network interface device structure | |
9d5c8243 | 4814 | * |
b980ac18 JK |
4815 | * Writes multicast address list to the MTA hash table. |
4816 | * Returns: -ENOMEM on failure | |
4817 | * 0 on no addresses written | |
4818 | * X on writing X addresses to MTA | |
9d5c8243 | 4819 | **/ |
68d480c4 | 4820 | static int igb_write_mc_addr_list(struct net_device *netdev) |
9d5c8243 AK |
4821 | { |
4822 | struct igb_adapter *adapter = netdev_priv(netdev); | |
4823 | struct e1000_hw *hw = &adapter->hw; | |
22bedad3 | 4824 | struct netdev_hw_addr *ha; |
68d480c4 | 4825 | u8 *mta_list; |
9d5c8243 AK |
4826 | int i; |
4827 | ||
4cd24eaf | 4828 | if (netdev_mc_empty(netdev)) { |
68d480c4 AD |
4829 | /* nothing to program, so clear mc list */ |
4830 | igb_update_mc_addr_list(hw, NULL, 0); | |
4831 | igb_restore_vf_multicasts(adapter); | |
4832 | return 0; | |
4833 | } | |
9d5c8243 | 4834 | |
6396bb22 | 4835 | mta_list = kcalloc(netdev_mc_count(netdev), 6, GFP_ATOMIC); |
68d480c4 AD |
4836 | if (!mta_list) |
4837 | return -ENOMEM; | |
ff41f8dc | 4838 | |
68d480c4 | 4839 | /* The shared function expects a packed array of only addresses. */ |
48e2f183 | 4840 | i = 0; |
22bedad3 JP |
4841 | netdev_for_each_mc_addr(ha, netdev) |
4842 | memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN); | |
68d480c4 | 4843 | |
68d480c4 AD |
4844 | igb_update_mc_addr_list(hw, mta_list, i); |
4845 | kfree(mta_list); | |
4846 | ||
4cd24eaf | 4847 | return netdev_mc_count(netdev); |
68d480c4 AD |
4848 | } |
4849 | ||
16903caa AD |
4850 | static int igb_vlan_promisc_enable(struct igb_adapter *adapter) |
4851 | { | |
4852 | struct e1000_hw *hw = &adapter->hw; | |
4853 | u32 i, pf_id; | |
4854 | ||
4855 | switch (hw->mac.type) { | |
4856 | case e1000_i210: | |
4857 | case e1000_i211: | |
4858 | case e1000_i350: | |
4859 | /* VLAN filtering needed for VLAN prio filter */ | |
4860 | if (adapter->netdev->features & NETIF_F_NTUPLE) | |
4861 | break; | |
4862 | /* fall through */ | |
4863 | case e1000_82576: | |
4864 | case e1000_82580: | |
4865 | case e1000_i354: | |
4866 | /* VLAN filtering needed for pool filtering */ | |
4867 | if (adapter->vfs_allocated_count) | |
4868 | break; | |
4869 | /* fall through */ | |
4870 | default: | |
4871 | return 1; | |
4872 | } | |
4873 | ||
4874 | /* We are already in VLAN promisc, nothing to do */ | |
4875 | if (adapter->flags & IGB_FLAG_VLAN_PROMISC) | |
4876 | return 0; | |
4877 | ||
4878 | if (!adapter->vfs_allocated_count) | |
4879 | goto set_vfta; | |
4880 | ||
4881 | /* Add PF to all active pools */ | |
4882 | pf_id = adapter->vfs_allocated_count + E1000_VLVF_POOLSEL_SHIFT; | |
4883 | ||
4884 | for (i = E1000_VLVF_ARRAY_SIZE; --i;) { | |
4885 | u32 vlvf = rd32(E1000_VLVF(i)); | |
4886 | ||
a51d8c21 | 4887 | vlvf |= BIT(pf_id); |
16903caa AD |
4888 | wr32(E1000_VLVF(i), vlvf); |
4889 | } | |
4890 | ||
4891 | set_vfta: | |
4892 | /* Set all bits in the VLAN filter table array */ | |
4893 | for (i = E1000_VLAN_FILTER_TBL_SIZE; i--;) | |
4894 | hw->mac.ops.write_vfta(hw, i, ~0U); | |
4895 | ||
4896 | /* Set flag so we don't redo unnecessary work */ | |
4897 | adapter->flags |= IGB_FLAG_VLAN_PROMISC; | |
4898 | ||
4899 | return 0; | |
4900 | } | |
4901 | ||
4902 | #define VFTA_BLOCK_SIZE 8 | |
4903 | static void igb_scrub_vfta(struct igb_adapter *adapter, u32 vfta_offset) | |
4904 | { | |
4905 | struct e1000_hw *hw = &adapter->hw; | |
4906 | u32 vfta[VFTA_BLOCK_SIZE] = { 0 }; | |
4907 | u32 vid_start = vfta_offset * 32; | |
4908 | u32 vid_end = vid_start + (VFTA_BLOCK_SIZE * 32); | |
4909 | u32 i, vid, word, bits, pf_id; | |
4910 | ||
4911 | /* guarantee that we don't scrub out management VLAN */ | |
4912 | vid = adapter->mng_vlan_id; | |
4913 | if (vid >= vid_start && vid < vid_end) | |
a51d8c21 | 4914 | vfta[(vid - vid_start) / 32] |= BIT(vid % 32); |
16903caa AD |
4915 | |
4916 | if (!adapter->vfs_allocated_count) | |
4917 | goto set_vfta; | |
4918 | ||
4919 | pf_id = adapter->vfs_allocated_count + E1000_VLVF_POOLSEL_SHIFT; | |
4920 | ||
4921 | for (i = E1000_VLVF_ARRAY_SIZE; --i;) { | |
4922 | u32 vlvf = rd32(E1000_VLVF(i)); | |
4923 | ||
4924 | /* pull VLAN ID from VLVF */ | |
4925 | vid = vlvf & VLAN_VID_MASK; | |
4926 | ||
4927 | /* only concern ourselves with a certain range */ | |
4928 | if (vid < vid_start || vid >= vid_end) | |
4929 | continue; | |
4930 | ||
4931 | if (vlvf & E1000_VLVF_VLANID_ENABLE) { | |
4932 | /* record VLAN ID in VFTA */ | |
a51d8c21 | 4933 | vfta[(vid - vid_start) / 32] |= BIT(vid % 32); |
16903caa AD |
4934 | |
4935 | /* if PF is part of this then continue */ | |
4936 | if (test_bit(vid, adapter->active_vlans)) | |
4937 | continue; | |
4938 | } | |
4939 | ||
4940 | /* remove PF from the pool */ | |
a51d8c21 | 4941 | bits = ~BIT(pf_id); |
16903caa AD |
4942 | bits &= rd32(E1000_VLVF(i)); |
4943 | wr32(E1000_VLVF(i), bits); | |
4944 | } | |
4945 | ||
4946 | set_vfta: | |
4947 | /* extract values from active_vlans and write back to VFTA */ | |
4948 | for (i = VFTA_BLOCK_SIZE; i--;) { | |
4949 | vid = (vfta_offset + i) * 32; | |
4950 | word = vid / BITS_PER_LONG; | |
4951 | bits = vid % BITS_PER_LONG; | |
4952 | ||
4953 | vfta[i] |= adapter->active_vlans[word] >> bits; | |
4954 | ||
4955 | hw->mac.ops.write_vfta(hw, vfta_offset + i, vfta[i]); | |
4956 | } | |
4957 | } | |
4958 | ||
4959 | static void igb_vlan_promisc_disable(struct igb_adapter *adapter) | |
4960 | { | |
4961 | u32 i; | |
4962 | ||
4963 | /* We are not in VLAN promisc, nothing to do */ | |
4964 | if (!(adapter->flags & IGB_FLAG_VLAN_PROMISC)) | |
4965 | return; | |
4966 | ||
4967 | /* Set flag so we don't redo unnecessary work */ | |
4968 | adapter->flags &= ~IGB_FLAG_VLAN_PROMISC; | |
4969 | ||
4970 | for (i = 0; i < E1000_VLAN_FILTER_TBL_SIZE; i += VFTA_BLOCK_SIZE) | |
4971 | igb_scrub_vfta(adapter, i); | |
4972 | } | |
4973 | ||
68d480c4 | 4974 | /** |
b980ac18 JK |
4975 | * igb_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set |
4976 | * @netdev: network interface device structure | |
68d480c4 | 4977 | * |
b980ac18 JK |
4978 | * The set_rx_mode entry point is called whenever the unicast or multicast |
4979 | * address lists or the network interface flags are updated. This routine is | |
4980 | * responsible for configuring the hardware for proper unicast, multicast, | |
4981 | * promiscuous mode, and all-multi behavior. | |
68d480c4 AD |
4982 | **/ |
4983 | static void igb_set_rx_mode(struct net_device *netdev) | |
4984 | { | |
4985 | struct igb_adapter *adapter = netdev_priv(netdev); | |
4986 | struct e1000_hw *hw = &adapter->hw; | |
4987 | unsigned int vfn = adapter->vfs_allocated_count; | |
cfbc871c | 4988 | u32 rctl = 0, vmolr = 0, rlpml = MAX_JUMBO_FRAME_SIZE; |
68d480c4 AD |
4989 | int count; |
4990 | ||
4991 | /* Check for Promiscuous and All Multicast modes */ | |
68d480c4 | 4992 | if (netdev->flags & IFF_PROMISC) { |
16903caa | 4993 | rctl |= E1000_RCTL_UPE | E1000_RCTL_MPE; |
bf456abb AD |
4994 | vmolr |= E1000_VMOLR_MPME; |
4995 | ||
4996 | /* enable use of UTA filter to force packets to default pool */ | |
4997 | if (hw->mac.type == e1000_82576) | |
4998 | vmolr |= E1000_VMOLR_ROPE; | |
68d480c4 AD |
4999 | } else { |
5000 | if (netdev->flags & IFF_ALLMULTI) { | |
5001 | rctl |= E1000_RCTL_MPE; | |
5002 | vmolr |= E1000_VMOLR_MPME; | |
5003 | } else { | |
b980ac18 | 5004 | /* Write addresses to the MTA, if the attempt fails |
25985edc | 5005 | * then we should just turn on promiscuous mode so |
68d480c4 AD |
5006 | * that we can at least receive multicast traffic |
5007 | */ | |
5008 | count = igb_write_mc_addr_list(netdev); | |
5009 | if (count < 0) { | |
5010 | rctl |= E1000_RCTL_MPE; | |
5011 | vmolr |= E1000_VMOLR_MPME; | |
5012 | } else if (count) { | |
5013 | vmolr |= E1000_VMOLR_ROMPE; | |
5014 | } | |
5015 | } | |
28fc06f5 | 5016 | } |
268f9d33 AD |
5017 | |
5018 | /* Write addresses to available RAR registers, if there is not | |
5019 | * sufficient space to store all the addresses then enable | |
5020 | * unicast promiscuous mode | |
5021 | */ | |
83c21335 | 5022 | if (__dev_uc_sync(netdev, igb_uc_sync, igb_uc_unsync)) { |
268f9d33 AD |
5023 | rctl |= E1000_RCTL_UPE; |
5024 | vmolr |= E1000_VMOLR_ROPE; | |
28fc06f5 | 5025 | } |
16903caa AD |
5026 | |
5027 | /* enable VLAN filtering by default */ | |
5028 | rctl |= E1000_RCTL_VFE; | |
5029 | ||
5030 | /* disable VLAN filtering for modes that require it */ | |
5031 | if ((netdev->flags & IFF_PROMISC) || | |
5032 | (netdev->features & NETIF_F_RXALL)) { | |
5033 | /* if we fail to set all rules then just clear VFE */ | |
5034 | if (igb_vlan_promisc_enable(adapter)) | |
5035 | rctl &= ~E1000_RCTL_VFE; | |
5036 | } else { | |
5037 | igb_vlan_promisc_disable(adapter); | |
5038 | } | |
5039 | ||
5040 | /* update state of unicast, multicast, and VLAN filtering modes */ | |
5041 | rctl |= rd32(E1000_RCTL) & ~(E1000_RCTL_UPE | E1000_RCTL_MPE | | |
5042 | E1000_RCTL_VFE); | |
68d480c4 | 5043 | wr32(E1000_RCTL, rctl); |
28fc06f5 | 5044 | |
cfbc871c AD |
5045 | #if (PAGE_SIZE < 8192) |
5046 | if (!adapter->vfs_allocated_count) { | |
5047 | if (adapter->max_frame_size <= IGB_MAX_FRAME_BUILD_SKB) | |
5048 | rlpml = IGB_MAX_FRAME_BUILD_SKB; | |
5049 | } | |
5050 | #endif | |
5051 | wr32(E1000_RLPML, rlpml); | |
5052 | ||
b980ac18 | 5053 | /* In order to support SR-IOV and eventually VMDq it is necessary to set |
68d480c4 AD |
5054 | * the VMOLR to enable the appropriate modes. Without this workaround |
5055 | * we will have issues with VLAN tag stripping not being done for frames | |
5056 | * that are only arriving because we are the default pool | |
5057 | */ | |
f96a8a0b | 5058 | if ((hw->mac.type < e1000_82576) || (hw->mac.type > e1000_i350)) |
28fc06f5 | 5059 | return; |
9d5c8243 | 5060 | |
bf456abb AD |
5061 | /* set UTA to appropriate mode */ |
5062 | igb_set_uta(adapter, !!(vmolr & E1000_VMOLR_ROPE)); | |
5063 | ||
68d480c4 | 5064 | vmolr |= rd32(E1000_VMOLR(vfn)) & |
b980ac18 | 5065 | ~(E1000_VMOLR_ROPE | E1000_VMOLR_MPME | E1000_VMOLR_ROMPE); |
45693bcb | 5066 | |
cfbc871c | 5067 | /* enable Rx jumbo frames, restrict as needed to support build_skb */ |
45693bcb | 5068 | vmolr &= ~E1000_VMOLR_RLPML_MASK; |
cfbc871c AD |
5069 | #if (PAGE_SIZE < 8192) |
5070 | if (adapter->max_frame_size <= IGB_MAX_FRAME_BUILD_SKB) | |
5071 | vmolr |= IGB_MAX_FRAME_BUILD_SKB; | |
5072 | else | |
5073 | #endif | |
5074 | vmolr |= MAX_JUMBO_FRAME_SIZE; | |
5075 | vmolr |= E1000_VMOLR_LPE; | |
45693bcb | 5076 | |
68d480c4 | 5077 | wr32(E1000_VMOLR(vfn), vmolr); |
45693bcb | 5078 | |
28fc06f5 | 5079 | igb_restore_vf_multicasts(adapter); |
9d5c8243 AK |
5080 | } |
5081 | ||
13800469 GR |
5082 | static void igb_check_wvbr(struct igb_adapter *adapter) |
5083 | { | |
5084 | struct e1000_hw *hw = &adapter->hw; | |
5085 | u32 wvbr = 0; | |
5086 | ||
5087 | switch (hw->mac.type) { | |
5088 | case e1000_82576: | |
5089 | case e1000_i350: | |
81ad807b CW |
5090 | wvbr = rd32(E1000_WVBR); |
5091 | if (!wvbr) | |
13800469 GR |
5092 | return; |
5093 | break; | |
5094 | default: | |
5095 | break; | |
5096 | } | |
5097 | ||
5098 | adapter->wvbr |= wvbr; | |
5099 | } | |
5100 | ||
5101 | #define IGB_STAGGERED_QUEUE_OFFSET 8 | |
5102 | ||
5103 | static void igb_spoof_check(struct igb_adapter *adapter) | |
5104 | { | |
5105 | int j; | |
5106 | ||
5107 | if (!adapter->wvbr) | |
5108 | return; | |
5109 | ||
9005df38 | 5110 | for (j = 0; j < adapter->vfs_allocated_count; j++) { |
a51d8c21 JK |
5111 | if (adapter->wvbr & BIT(j) || |
5112 | adapter->wvbr & BIT(j + IGB_STAGGERED_QUEUE_OFFSET)) { | |
13800469 GR |
5113 | dev_warn(&adapter->pdev->dev, |
5114 | "Spoof event(s) detected on VF %d\n", j); | |
5115 | adapter->wvbr &= | |
a51d8c21 JK |
5116 | ~(BIT(j) | |
5117 | BIT(j + IGB_STAGGERED_QUEUE_OFFSET)); | |
13800469 GR |
5118 | } |
5119 | } | |
5120 | } | |
5121 | ||
9d5c8243 | 5122 | /* Need to wait a few seconds after link up to get diagnostic information from |
b980ac18 JK |
5123 | * the phy |
5124 | */ | |
26566eae | 5125 | static void igb_update_phy_info(struct timer_list *t) |
9d5c8243 | 5126 | { |
26566eae | 5127 | struct igb_adapter *adapter = from_timer(adapter, t, phy_info_timer); |
f5f4cf08 | 5128 | igb_get_phy_info(&adapter->hw); |
9d5c8243 AK |
5129 | } |
5130 | ||
4d6b725e | 5131 | /** |
b980ac18 JK |
5132 | * igb_has_link - check shared code for link and determine up/down |
5133 | * @adapter: pointer to driver private info | |
4d6b725e | 5134 | **/ |
3145535a | 5135 | bool igb_has_link(struct igb_adapter *adapter) |
4d6b725e AD |
5136 | { |
5137 | struct e1000_hw *hw = &adapter->hw; | |
5138 | bool link_active = false; | |
4d6b725e AD |
5139 | |
5140 | /* get_link_status is set on LSC (link status) interrupt or | |
5141 | * rx sequence error interrupt. get_link_status will stay | |
5142 | * false until the e1000_check_for_link establishes link | |
5143 | * for copper adapters ONLY | |
5144 | */ | |
5145 | switch (hw->phy.media_type) { | |
5146 | case e1000_media_type_copper: | |
e5c3370f AA |
5147 | if (!hw->mac.get_link_status) |
5148 | return true; | |
7e9660ff | 5149 | /* fall through */ |
4d6b725e | 5150 | case e1000_media_type_internal_serdes: |
e5c3370f AA |
5151 | hw->mac.ops.check_for_link(hw); |
5152 | link_active = !hw->mac.get_link_status; | |
4d6b725e AD |
5153 | break; |
5154 | default: | |
5155 | case e1000_media_type_unknown: | |
5156 | break; | |
5157 | } | |
5158 | ||
aa9b8cc4 AA |
5159 | if (((hw->mac.type == e1000_i210) || |
5160 | (hw->mac.type == e1000_i211)) && | |
5161 | (hw->phy.id == I210_I_PHY_ID)) { | |
5162 | if (!netif_carrier_ok(adapter->netdev)) { | |
5163 | adapter->flags &= ~IGB_FLAG_NEED_LINK_UPDATE; | |
5164 | } else if (!(adapter->flags & IGB_FLAG_NEED_LINK_UPDATE)) { | |
5165 | adapter->flags |= IGB_FLAG_NEED_LINK_UPDATE; | |
5166 | adapter->link_check_timeout = jiffies; | |
5167 | } | |
5168 | } | |
5169 | ||
4d6b725e AD |
5170 | return link_active; |
5171 | } | |
5172 | ||
563988dc SA |
5173 | static bool igb_thermal_sensor_event(struct e1000_hw *hw, u32 event) |
5174 | { | |
5175 | bool ret = false; | |
5176 | u32 ctrl_ext, thstat; | |
5177 | ||
f96a8a0b | 5178 | /* check for thermal sensor event on i350 copper only */ |
563988dc SA |
5179 | if (hw->mac.type == e1000_i350) { |
5180 | thstat = rd32(E1000_THSTAT); | |
5181 | ctrl_ext = rd32(E1000_CTRL_EXT); | |
5182 | ||
5183 | if ((hw->phy.media_type == e1000_media_type_copper) && | |
5c17a203 | 5184 | !(ctrl_ext & E1000_CTRL_EXT_LINK_MODE_SGMII)) |
563988dc | 5185 | ret = !!(thstat & event); |
563988dc SA |
5186 | } |
5187 | ||
5188 | return ret; | |
5189 | } | |
5190 | ||
1516f0a6 CW |
5191 | /** |
5192 | * igb_check_lvmmc - check for malformed packets received | |
5193 | * and indicated in LVMMC register | |
5194 | * @adapter: pointer to adapter | |
5195 | **/ | |
5196 | static void igb_check_lvmmc(struct igb_adapter *adapter) | |
5197 | { | |
5198 | struct e1000_hw *hw = &adapter->hw; | |
5199 | u32 lvmmc; | |
5200 | ||
5201 | lvmmc = rd32(E1000_LVMMC); | |
5202 | if (lvmmc) { | |
5203 | if (unlikely(net_ratelimit())) { | |
5204 | netdev_warn(adapter->netdev, | |
5205 | "malformed Tx packet detected and dropped, LVMMC:0x%08x\n", | |
5206 | lvmmc); | |
5207 | } | |
5208 | } | |
5209 | } | |
5210 | ||
9d5c8243 | 5211 | /** |
b980ac18 JK |
5212 | * igb_watchdog - Timer Call-back |
5213 | * @data: pointer to adapter cast into an unsigned long | |
9d5c8243 | 5214 | **/ |
26566eae | 5215 | static void igb_watchdog(struct timer_list *t) |
9d5c8243 | 5216 | { |
26566eae | 5217 | struct igb_adapter *adapter = from_timer(adapter, t, watchdog_timer); |
9d5c8243 AK |
5218 | /* Do the rest outside of interrupt context */ |
5219 | schedule_work(&adapter->watchdog_task); | |
5220 | } | |
5221 | ||
5222 | static void igb_watchdog_task(struct work_struct *work) | |
5223 | { | |
5224 | struct igb_adapter *adapter = container_of(work, | |
b980ac18 JK |
5225 | struct igb_adapter, |
5226 | watchdog_task); | |
9d5c8243 | 5227 | struct e1000_hw *hw = &adapter->hw; |
c0ba4778 | 5228 | struct e1000_phy_info *phy = &hw->phy; |
9d5c8243 | 5229 | struct net_device *netdev = adapter->netdev; |
563988dc | 5230 | u32 link; |
7a6ea550 | 5231 | int i; |
56cec249 | 5232 | u32 connsw; |
b72f3f72 | 5233 | u16 phy_data, retry_count = 20; |
9d5c8243 | 5234 | |
4d6b725e | 5235 | link = igb_has_link(adapter); |
aa9b8cc4 AA |
5236 | |
5237 | if (adapter->flags & IGB_FLAG_NEED_LINK_UPDATE) { | |
5238 | if (time_after(jiffies, (adapter->link_check_timeout + HZ))) | |
5239 | adapter->flags &= ~IGB_FLAG_NEED_LINK_UPDATE; | |
5240 | else | |
5241 | link = false; | |
5242 | } | |
5243 | ||
56cec249 CW |
5244 | /* Force link down if we have fiber to swap to */ |
5245 | if (adapter->flags & IGB_FLAG_MAS_ENABLE) { | |
5246 | if (hw->phy.media_type == e1000_media_type_copper) { | |
5247 | connsw = rd32(E1000_CONNSW); | |
5248 | if (!(connsw & E1000_CONNSW_AUTOSENSE_EN)) | |
5249 | link = 0; | |
5250 | } | |
5251 | } | |
9d5c8243 | 5252 | if (link) { |
2bdfc4e2 CW |
5253 | /* Perform a reset if the media type changed. */ |
5254 | if (hw->dev_spec._82575.media_changed) { | |
5255 | hw->dev_spec._82575.media_changed = false; | |
5256 | adapter->flags |= IGB_FLAG_MEDIA_RESET; | |
5257 | igb_reset(adapter); | |
5258 | } | |
749ab2cd YZ |
5259 | /* Cancel scheduled suspend requests. */ |
5260 | pm_runtime_resume(netdev->dev.parent); | |
5261 | ||
9d5c8243 AK |
5262 | if (!netif_carrier_ok(netdev)) { |
5263 | u32 ctrl; | |
9005df38 | 5264 | |
330a6d6a | 5265 | hw->mac.ops.get_speed_and_duplex(hw, |
b980ac18 JK |
5266 | &adapter->link_speed, |
5267 | &adapter->link_duplex); | |
9d5c8243 AK |
5268 | |
5269 | ctrl = rd32(E1000_CTRL); | |
527d47c1 | 5270 | /* Links status message must follow this format */ |
c75c4edf CW |
5271 | netdev_info(netdev, |
5272 | "igb: %s NIC Link is Up %d Mbps %s Duplex, Flow Control: %s\n", | |
559e9c49 AD |
5273 | netdev->name, |
5274 | adapter->link_speed, | |
5275 | adapter->link_duplex == FULL_DUPLEX ? | |
876d2d6f JK |
5276 | "Full" : "Half", |
5277 | (ctrl & E1000_CTRL_TFCE) && | |
5278 | (ctrl & E1000_CTRL_RFCE) ? "RX/TX" : | |
5279 | (ctrl & E1000_CTRL_RFCE) ? "RX" : | |
5280 | (ctrl & E1000_CTRL_TFCE) ? "TX" : "None"); | |
9d5c8243 | 5281 | |
f4c01e96 CW |
5282 | /* disable EEE if enabled */ |
5283 | if ((adapter->flags & IGB_FLAG_EEE) && | |
5284 | (adapter->link_duplex == HALF_DUPLEX)) { | |
5285 | dev_info(&adapter->pdev->dev, | |
5286 | "EEE Disabled: unsupported at half duplex. Re-enable using ethtool when at full duplex.\n"); | |
5287 | adapter->hw.dev_spec._82575.eee_disable = true; | |
5288 | adapter->flags &= ~IGB_FLAG_EEE; | |
5289 | } | |
5290 | ||
c0ba4778 KS |
5291 | /* check if SmartSpeed worked */ |
5292 | igb_check_downshift(hw); | |
5293 | if (phy->speed_downgraded) | |
5294 | netdev_warn(netdev, "Link Speed was downgraded by SmartSpeed\n"); | |
5295 | ||
563988dc | 5296 | /* check for thermal sensor event */ |
876d2d6f | 5297 | if (igb_thermal_sensor_event(hw, |
d34a15ab | 5298 | E1000_THSTAT_LINK_THROTTLE)) |
c75c4edf | 5299 | netdev_info(netdev, "The network adapter link speed was downshifted because it overheated\n"); |
563988dc | 5300 | |
d07f3e37 | 5301 | /* adjust timeout factor according to speed/duplex */ |
9d5c8243 AK |
5302 | adapter->tx_timeout_factor = 1; |
5303 | switch (adapter->link_speed) { | |
5304 | case SPEED_10: | |
9d5c8243 AK |
5305 | adapter->tx_timeout_factor = 14; |
5306 | break; | |
5307 | case SPEED_100: | |
9d5c8243 AK |
5308 | /* maybe add some timeout factor ? */ |
5309 | break; | |
5310 | } | |
5311 | ||
b72f3f72 TU |
5312 | if (adapter->link_speed != SPEED_1000) |
5313 | goto no_wait; | |
5314 | ||
5315 | /* wait for Remote receiver status OK */ | |
5316 | retry_read_status: | |
5317 | if (!igb_read_phy_reg(hw, PHY_1000T_STATUS, | |
5318 | &phy_data)) { | |
5319 | if (!(phy_data & SR_1000T_REMOTE_RX_STATUS) && | |
5320 | retry_count) { | |
5321 | msleep(100); | |
5322 | retry_count--; | |
5323 | goto retry_read_status; | |
5324 | } else if (!retry_count) { | |
5325 | dev_err(&adapter->pdev->dev, "exceed max 2 second\n"); | |
5326 | } | |
5327 | } else { | |
5328 | dev_err(&adapter->pdev->dev, "read 1000Base-T Status Reg\n"); | |
5329 | } | |
5330 | no_wait: | |
9d5c8243 | 5331 | netif_carrier_on(netdev); |
9d5c8243 | 5332 | |
4ae196df | 5333 | igb_ping_all_vfs(adapter); |
17dc566c | 5334 | igb_check_vf_rate_limit(adapter); |
4ae196df | 5335 | |
4b1a9877 | 5336 | /* link state has changed, schedule phy info update */ |
9d5c8243 AK |
5337 | if (!test_bit(__IGB_DOWN, &adapter->state)) |
5338 | mod_timer(&adapter->phy_info_timer, | |
5339 | round_jiffies(jiffies + 2 * HZ)); | |
5340 | } | |
5341 | } else { | |
5342 | if (netif_carrier_ok(netdev)) { | |
5343 | adapter->link_speed = 0; | |
5344 | adapter->link_duplex = 0; | |
563988dc SA |
5345 | |
5346 | /* check for thermal sensor event */ | |
876d2d6f JK |
5347 | if (igb_thermal_sensor_event(hw, |
5348 | E1000_THSTAT_PWR_DOWN)) { | |
c75c4edf | 5349 | netdev_err(netdev, "The network adapter was stopped because it overheated\n"); |
7ef5ed1c | 5350 | } |
563988dc | 5351 | |
527d47c1 | 5352 | /* Links status message must follow this format */ |
c75c4edf | 5353 | netdev_info(netdev, "igb: %s NIC Link is Down\n", |
527d47c1 | 5354 | netdev->name); |
9d5c8243 | 5355 | netif_carrier_off(netdev); |
4b1a9877 | 5356 | |
4ae196df AD |
5357 | igb_ping_all_vfs(adapter); |
5358 | ||
4b1a9877 | 5359 | /* link state has changed, schedule phy info update */ |
9d5c8243 AK |
5360 | if (!test_bit(__IGB_DOWN, &adapter->state)) |
5361 | mod_timer(&adapter->phy_info_timer, | |
5362 | round_jiffies(jiffies + 2 * HZ)); | |
749ab2cd | 5363 | |
56cec249 CW |
5364 | /* link is down, time to check for alternate media */ |
5365 | if (adapter->flags & IGB_FLAG_MAS_ENABLE) { | |
5366 | igb_check_swap_media(adapter); | |
5367 | if (adapter->flags & IGB_FLAG_MEDIA_RESET) { | |
5368 | schedule_work(&adapter->reset_task); | |
5369 | /* return immediately */ | |
5370 | return; | |
5371 | } | |
5372 | } | |
749ab2cd YZ |
5373 | pm_schedule_suspend(netdev->dev.parent, |
5374 | MSEC_PER_SEC * 5); | |
56cec249 CW |
5375 | |
5376 | /* also check for alternate media here */ | |
5377 | } else if (!netif_carrier_ok(netdev) && | |
5378 | (adapter->flags & IGB_FLAG_MAS_ENABLE)) { | |
5379 | igb_check_swap_media(adapter); | |
5380 | if (adapter->flags & IGB_FLAG_MEDIA_RESET) { | |
5381 | schedule_work(&adapter->reset_task); | |
5382 | /* return immediately */ | |
5383 | return; | |
5384 | } | |
9d5c8243 AK |
5385 | } |
5386 | } | |
5387 | ||
5642e27b | 5388 | spin_lock(&adapter->stats64_lock); |
81e3f64a | 5389 | igb_update_stats(adapter); |
5642e27b | 5390 | spin_unlock(&adapter->stats64_lock); |
9d5c8243 | 5391 | |
dbabb065 | 5392 | for (i = 0; i < adapter->num_tx_queues; i++) { |
3025a446 | 5393 | struct igb_ring *tx_ring = adapter->tx_ring[i]; |
dbabb065 | 5394 | if (!netif_carrier_ok(netdev)) { |
9d5c8243 AK |
5395 | /* We've lost link, so the controller stops DMA, |
5396 | * but we've got queued Tx work that's never going | |
5397 | * to get done, so reset controller to flush Tx. | |
b980ac18 JK |
5398 | * (Do the reset outside of interrupt context). |
5399 | */ | |
dbabb065 AD |
5400 | if (igb_desc_unused(tx_ring) + 1 < tx_ring->count) { |
5401 | adapter->tx_timeout_count++; | |
5402 | schedule_work(&adapter->reset_task); | |
5403 | /* return immediately since reset is imminent */ | |
5404 | return; | |
5405 | } | |
9d5c8243 | 5406 | } |
9d5c8243 | 5407 | |
dbabb065 | 5408 | /* Force detection of hung controller every watchdog period */ |
6d095fa8 | 5409 | set_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags); |
dbabb065 | 5410 | } |
f7ba205e | 5411 | |
b980ac18 | 5412 | /* Cause software interrupt to ensure Rx ring is cleaned */ |
cd14ef54 | 5413 | if (adapter->flags & IGB_FLAG_HAS_MSIX) { |
047e0030 | 5414 | u32 eics = 0; |
9005df38 | 5415 | |
0d1ae7f4 AD |
5416 | for (i = 0; i < adapter->num_q_vectors; i++) |
5417 | eics |= adapter->q_vector[i]->eims_value; | |
7a6ea550 AD |
5418 | wr32(E1000_EICS, eics); |
5419 | } else { | |
5420 | wr32(E1000_ICS, E1000_ICS_RXDMT0); | |
5421 | } | |
9d5c8243 | 5422 | |
13800469 | 5423 | igb_spoof_check(adapter); |
fc580751 | 5424 | igb_ptp_rx_hang(adapter); |
e5f36ad1 | 5425 | igb_ptp_tx_hang(adapter); |
13800469 | 5426 | |
1516f0a6 CW |
5427 | /* Check LVMMC register on i350/i354 only */ |
5428 | if ((adapter->hw.mac.type == e1000_i350) || | |
5429 | (adapter->hw.mac.type == e1000_i354)) | |
5430 | igb_check_lvmmc(adapter); | |
5431 | ||
9d5c8243 | 5432 | /* Reset the timer */ |
aa9b8cc4 AA |
5433 | if (!test_bit(__IGB_DOWN, &adapter->state)) { |
5434 | if (adapter->flags & IGB_FLAG_NEED_LINK_UPDATE) | |
5435 | mod_timer(&adapter->watchdog_timer, | |
5436 | round_jiffies(jiffies + HZ)); | |
5437 | else | |
5438 | mod_timer(&adapter->watchdog_timer, | |
5439 | round_jiffies(jiffies + 2 * HZ)); | |
5440 | } | |
9d5c8243 AK |
5441 | } |
5442 | ||
5443 | enum latency_range { | |
5444 | lowest_latency = 0, | |
5445 | low_latency = 1, | |
5446 | bulk_latency = 2, | |
5447 | latency_invalid = 255 | |
5448 | }; | |
5449 | ||
6eb5a7f1 | 5450 | /** |
b980ac18 JK |
5451 | * igb_update_ring_itr - update the dynamic ITR value based on packet size |
5452 | * @q_vector: pointer to q_vector | |
6eb5a7f1 | 5453 | * |
b980ac18 JK |
5454 | * Stores a new ITR value based on strictly on packet size. This |
5455 | * algorithm is less sophisticated than that used in igb_update_itr, | |
5456 | * due to the difficulty of synchronizing statistics across multiple | |
5457 | * receive rings. The divisors and thresholds used by this function | |
5458 | * were determined based on theoretical maximum wire speed and testing | |
5459 | * data, in order to minimize response time while increasing bulk | |
5460 | * throughput. | |
406d4965 | 5461 | * This functionality is controlled by ethtool's coalescing settings. |
b980ac18 JK |
5462 | * NOTE: This function is called only when operating in a multiqueue |
5463 | * receive environment. | |
6eb5a7f1 | 5464 | **/ |
047e0030 | 5465 | static void igb_update_ring_itr(struct igb_q_vector *q_vector) |
9d5c8243 | 5466 | { |
047e0030 | 5467 | int new_val = q_vector->itr_val; |
6eb5a7f1 | 5468 | int avg_wire_size = 0; |
047e0030 | 5469 | struct igb_adapter *adapter = q_vector->adapter; |
12dcd86b | 5470 | unsigned int packets; |
9d5c8243 | 5471 | |
6eb5a7f1 AD |
5472 | /* For non-gigabit speeds, just fix the interrupt rate at 4000 |
5473 | * ints/sec - ITR timer value of 120 ticks. | |
5474 | */ | |
5475 | if (adapter->link_speed != SPEED_1000) { | |
0ba82994 | 5476 | new_val = IGB_4K_ITR; |
6eb5a7f1 | 5477 | goto set_itr_val; |
9d5c8243 | 5478 | } |
047e0030 | 5479 | |
0ba82994 AD |
5480 | packets = q_vector->rx.total_packets; |
5481 | if (packets) | |
5482 | avg_wire_size = q_vector->rx.total_bytes / packets; | |
047e0030 | 5483 | |
0ba82994 AD |
5484 | packets = q_vector->tx.total_packets; |
5485 | if (packets) | |
5486 | avg_wire_size = max_t(u32, avg_wire_size, | |
5487 | q_vector->tx.total_bytes / packets); | |
047e0030 AD |
5488 | |
5489 | /* if avg_wire_size isn't set no work was done */ | |
5490 | if (!avg_wire_size) | |
5491 | goto clear_counts; | |
9d5c8243 | 5492 | |
6eb5a7f1 AD |
5493 | /* Add 24 bytes to size to account for CRC, preamble, and gap */ |
5494 | avg_wire_size += 24; | |
5495 | ||
5496 | /* Don't starve jumbo frames */ | |
5497 | avg_wire_size = min(avg_wire_size, 3000); | |
9d5c8243 | 5498 | |
6eb5a7f1 AD |
5499 | /* Give a little boost to mid-size frames */ |
5500 | if ((avg_wire_size > 300) && (avg_wire_size < 1200)) | |
5501 | new_val = avg_wire_size / 3; | |
5502 | else | |
5503 | new_val = avg_wire_size / 2; | |
9d5c8243 | 5504 | |
0ba82994 AD |
5505 | /* conservative mode (itr 3) eliminates the lowest_latency setting */ |
5506 | if (new_val < IGB_20K_ITR && | |
5507 | ((q_vector->rx.ring && adapter->rx_itr_setting == 3) || | |
5508 | (!q_vector->rx.ring && adapter->tx_itr_setting == 3))) | |
5509 | new_val = IGB_20K_ITR; | |
abe1c363 | 5510 | |
6eb5a7f1 | 5511 | set_itr_val: |
047e0030 AD |
5512 | if (new_val != q_vector->itr_val) { |
5513 | q_vector->itr_val = new_val; | |
5514 | q_vector->set_itr = 1; | |
9d5c8243 | 5515 | } |
6eb5a7f1 | 5516 | clear_counts: |
0ba82994 AD |
5517 | q_vector->rx.total_bytes = 0; |
5518 | q_vector->rx.total_packets = 0; | |
5519 | q_vector->tx.total_bytes = 0; | |
5520 | q_vector->tx.total_packets = 0; | |
9d5c8243 AK |
5521 | } |
5522 | ||
5523 | /** | |
b980ac18 JK |
5524 | * igb_update_itr - update the dynamic ITR value based on statistics |
5525 | * @q_vector: pointer to q_vector | |
5526 | * @ring_container: ring info to update the itr for | |
5527 | * | |
5528 | * Stores a new ITR value based on packets and byte | |
5529 | * counts during the last interrupt. The advantage of per interrupt | |
5530 | * computation is faster updates and more accurate ITR for the current | |
5531 | * traffic pattern. Constants in this function were computed | |
5532 | * based on theoretical maximum wire speed and thresholds were set based | |
5533 | * on testing data as well as attempting to minimize response time | |
5534 | * while increasing bulk throughput. | |
406d4965 | 5535 | * This functionality is controlled by ethtool's coalescing settings. |
b980ac18 JK |
5536 | * NOTE: These calculations are only valid when operating in a single- |
5537 | * queue environment. | |
9d5c8243 | 5538 | **/ |
0ba82994 AD |
5539 | static void igb_update_itr(struct igb_q_vector *q_vector, |
5540 | struct igb_ring_container *ring_container) | |
9d5c8243 | 5541 | { |
0ba82994 AD |
5542 | unsigned int packets = ring_container->total_packets; |
5543 | unsigned int bytes = ring_container->total_bytes; | |
5544 | u8 itrval = ring_container->itr; | |
9d5c8243 | 5545 | |
0ba82994 | 5546 | /* no packets, exit with status unchanged */ |
9d5c8243 | 5547 | if (packets == 0) |
0ba82994 | 5548 | return; |
9d5c8243 | 5549 | |
0ba82994 | 5550 | switch (itrval) { |
9d5c8243 AK |
5551 | case lowest_latency: |
5552 | /* handle TSO and jumbo frames */ | |
5553 | if (bytes/packets > 8000) | |
0ba82994 | 5554 | itrval = bulk_latency; |
9d5c8243 | 5555 | else if ((packets < 5) && (bytes > 512)) |
0ba82994 | 5556 | itrval = low_latency; |
9d5c8243 AK |
5557 | break; |
5558 | case low_latency: /* 50 usec aka 20000 ints/s */ | |
5559 | if (bytes > 10000) { | |
5560 | /* this if handles the TSO accounting */ | |
d34a15ab | 5561 | if (bytes/packets > 8000) |
0ba82994 | 5562 | itrval = bulk_latency; |
d34a15ab | 5563 | else if ((packets < 10) || ((bytes/packets) > 1200)) |
0ba82994 | 5564 | itrval = bulk_latency; |
d34a15ab | 5565 | else if ((packets > 35)) |
0ba82994 | 5566 | itrval = lowest_latency; |
9d5c8243 | 5567 | } else if (bytes/packets > 2000) { |
0ba82994 | 5568 | itrval = bulk_latency; |
9d5c8243 | 5569 | } else if (packets <= 2 && bytes < 512) { |
0ba82994 | 5570 | itrval = lowest_latency; |
9d5c8243 AK |
5571 | } |
5572 | break; | |
5573 | case bulk_latency: /* 250 usec aka 4000 ints/s */ | |
5574 | if (bytes > 25000) { | |
5575 | if (packets > 35) | |
0ba82994 | 5576 | itrval = low_latency; |
1e5c3d21 | 5577 | } else if (bytes < 1500) { |
0ba82994 | 5578 | itrval = low_latency; |
9d5c8243 AK |
5579 | } |
5580 | break; | |
5581 | } | |
5582 | ||
0ba82994 AD |
5583 | /* clear work counters since we have the values we need */ |
5584 | ring_container->total_bytes = 0; | |
5585 | ring_container->total_packets = 0; | |
5586 | ||
5587 | /* write updated itr to ring container */ | |
5588 | ring_container->itr = itrval; | |
9d5c8243 AK |
5589 | } |
5590 | ||
0ba82994 | 5591 | static void igb_set_itr(struct igb_q_vector *q_vector) |
9d5c8243 | 5592 | { |
0ba82994 | 5593 | struct igb_adapter *adapter = q_vector->adapter; |
047e0030 | 5594 | u32 new_itr = q_vector->itr_val; |
0ba82994 | 5595 | u8 current_itr = 0; |
9d5c8243 AK |
5596 | |
5597 | /* for non-gigabit speeds, just fix the interrupt rate at 4000 */ | |
5598 | if (adapter->link_speed != SPEED_1000) { | |
5599 | current_itr = 0; | |
0ba82994 | 5600 | new_itr = IGB_4K_ITR; |
9d5c8243 AK |
5601 | goto set_itr_now; |
5602 | } | |
5603 | ||
0ba82994 AD |
5604 | igb_update_itr(q_vector, &q_vector->tx); |
5605 | igb_update_itr(q_vector, &q_vector->rx); | |
9d5c8243 | 5606 | |
0ba82994 | 5607 | current_itr = max(q_vector->rx.itr, q_vector->tx.itr); |
9d5c8243 | 5608 | |
6eb5a7f1 | 5609 | /* conservative mode (itr 3) eliminates the lowest_latency setting */ |
0ba82994 AD |
5610 | if (current_itr == lowest_latency && |
5611 | ((q_vector->rx.ring && adapter->rx_itr_setting == 3) || | |
5612 | (!q_vector->rx.ring && adapter->tx_itr_setting == 3))) | |
6eb5a7f1 AD |
5613 | current_itr = low_latency; |
5614 | ||
9d5c8243 AK |
5615 | switch (current_itr) { |
5616 | /* counts and packets in update_itr are dependent on these numbers */ | |
5617 | case lowest_latency: | |
0ba82994 | 5618 | new_itr = IGB_70K_ITR; /* 70,000 ints/sec */ |
9d5c8243 AK |
5619 | break; |
5620 | case low_latency: | |
0ba82994 | 5621 | new_itr = IGB_20K_ITR; /* 20,000 ints/sec */ |
9d5c8243 AK |
5622 | break; |
5623 | case bulk_latency: | |
0ba82994 | 5624 | new_itr = IGB_4K_ITR; /* 4,000 ints/sec */ |
9d5c8243 AK |
5625 | break; |
5626 | default: | |
5627 | break; | |
5628 | } | |
5629 | ||
5630 | set_itr_now: | |
047e0030 | 5631 | if (new_itr != q_vector->itr_val) { |
9d5c8243 AK |
5632 | /* this attempts to bias the interrupt rate towards Bulk |
5633 | * by adding intermediate steps when interrupt rate is | |
b980ac18 JK |
5634 | * increasing |
5635 | */ | |
047e0030 | 5636 | new_itr = new_itr > q_vector->itr_val ? |
b980ac18 JK |
5637 | max((new_itr * q_vector->itr_val) / |
5638 | (new_itr + (q_vector->itr_val >> 2)), | |
5639 | new_itr) : new_itr; | |
9d5c8243 AK |
5640 | /* Don't write the value here; it resets the adapter's |
5641 | * internal timer, and causes us to delay far longer than | |
5642 | * we should between interrupts. Instead, we write the ITR | |
5643 | * value at the beginning of the next interrupt so the timing | |
5644 | * ends up being correct. | |
5645 | */ | |
047e0030 AD |
5646 | q_vector->itr_val = new_itr; |
5647 | q_vector->set_itr = 1; | |
9d5c8243 | 5648 | } |
9d5c8243 AK |
5649 | } |
5650 | ||
3048cf84 JSP |
5651 | static void igb_tx_ctxtdesc(struct igb_ring *tx_ring, |
5652 | struct igb_tx_buffer *first, | |
5653 | u32 vlan_macip_lens, u32 type_tucmd, | |
5654 | u32 mss_l4len_idx) | |
7d13a7d0 AD |
5655 | { |
5656 | struct e1000_adv_tx_context_desc *context_desc; | |
5657 | u16 i = tx_ring->next_to_use; | |
3048cf84 | 5658 | struct timespec64 ts; |
7d13a7d0 AD |
5659 | |
5660 | context_desc = IGB_TX_CTXTDESC(tx_ring, i); | |
5661 | ||
5662 | i++; | |
5663 | tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; | |
5664 | ||
5665 | /* set bits to identify this as an advanced context descriptor */ | |
5666 | type_tucmd |= E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT; | |
5667 | ||
5668 | /* For 82575, context index must be unique per ring. */ | |
866cff06 | 5669 | if (test_bit(IGB_RING_FLAG_TX_CTX_IDX, &tx_ring->flags)) |
7d13a7d0 AD |
5670 | mss_l4len_idx |= tx_ring->reg_idx << 4; |
5671 | ||
5672 | context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens); | |
7d13a7d0 AD |
5673 | context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd); |
5674 | context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx); | |
3048cf84 JSP |
5675 | |
5676 | /* We assume there is always a valid tx time available. Invalid times | |
5677 | * should have been handled by the upper layers. | |
5678 | */ | |
5679 | if (tx_ring->launchtime_enable) { | |
6acab13b JK |
5680 | ts = ktime_to_timespec64(first->skb->tstamp); |
5681 | first->skb->tstamp = ktime_set(0, 0); | |
3048cf84 JSP |
5682 | context_desc->seqnum_seed = cpu_to_le32(ts.tv_nsec / 32); |
5683 | } else { | |
5684 | context_desc->seqnum_seed = 0; | |
5685 | } | |
7d13a7d0 AD |
5686 | } |
5687 | ||
7af40ad9 AD |
5688 | static int igb_tso(struct igb_ring *tx_ring, |
5689 | struct igb_tx_buffer *first, | |
5690 | u8 *hdr_len) | |
9d5c8243 | 5691 | { |
e10715d3 | 5692 | u32 vlan_macip_lens, type_tucmd, mss_l4len_idx; |
7af40ad9 | 5693 | struct sk_buff *skb = first->skb; |
e10715d3 AD |
5694 | union { |
5695 | struct iphdr *v4; | |
5696 | struct ipv6hdr *v6; | |
5697 | unsigned char *hdr; | |
5698 | } ip; | |
5699 | union { | |
5700 | struct tcphdr *tcp; | |
4085d06d | 5701 | struct udphdr *udp; |
e10715d3 AD |
5702 | unsigned char *hdr; |
5703 | } l4; | |
5704 | u32 paylen, l4_offset; | |
06c14e5a | 5705 | int err; |
7d13a7d0 | 5706 | |
ed6aa105 AD |
5707 | if (skb->ip_summed != CHECKSUM_PARTIAL) |
5708 | return 0; | |
5709 | ||
7d13a7d0 AD |
5710 | if (!skb_is_gso(skb)) |
5711 | return 0; | |
9d5c8243 | 5712 | |
06c14e5a FR |
5713 | err = skb_cow_head(skb, 0); |
5714 | if (err < 0) | |
5715 | return err; | |
9d5c8243 | 5716 | |
e10715d3 AD |
5717 | ip.hdr = skb_network_header(skb); |
5718 | l4.hdr = skb_checksum_start(skb); | |
5719 | ||
7d13a7d0 | 5720 | /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */ |
4085d06d JH |
5721 | type_tucmd = (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) ? |
5722 | E1000_ADVTXD_TUCMD_L4T_UDP : E1000_ADVTXD_TUCMD_L4T_TCP; | |
9d5c8243 | 5723 | |
e10715d3 AD |
5724 | /* initialize outer IP header fields */ |
5725 | if (ip.v4->version == 4) { | |
516165a1 AD |
5726 | unsigned char *csum_start = skb_checksum_start(skb); |
5727 | unsigned char *trans_start = ip.hdr + (ip.v4->ihl * 4); | |
5728 | ||
e10715d3 AD |
5729 | /* IP header will have to cancel out any data that |
5730 | * is not a part of the outer IP header | |
5731 | */ | |
516165a1 AD |
5732 | ip.v4->check = csum_fold(csum_partial(trans_start, |
5733 | csum_start - trans_start, | |
5734 | 0)); | |
7d13a7d0 | 5735 | type_tucmd |= E1000_ADVTXD_TUCMD_IPV4; |
e10715d3 AD |
5736 | |
5737 | ip.v4->tot_len = 0; | |
7af40ad9 AD |
5738 | first->tx_flags |= IGB_TX_FLAGS_TSO | |
5739 | IGB_TX_FLAGS_CSUM | | |
5740 | IGB_TX_FLAGS_IPV4; | |
e10715d3 AD |
5741 | } else { |
5742 | ip.v6->payload_len = 0; | |
7af40ad9 AD |
5743 | first->tx_flags |= IGB_TX_FLAGS_TSO | |
5744 | IGB_TX_FLAGS_CSUM; | |
9d5c8243 AK |
5745 | } |
5746 | ||
e10715d3 AD |
5747 | /* determine offset of inner transport header */ |
5748 | l4_offset = l4.hdr - skb->data; | |
5749 | ||
e10715d3 AD |
5750 | /* remove payload length from inner checksum */ |
5751 | paylen = skb->len - l4_offset; | |
4085d06d JH |
5752 | if (type_tucmd & E1000_ADVTXD_TUCMD_L4T_TCP) { |
5753 | /* compute length of segmentation header */ | |
5754 | *hdr_len = (l4.tcp->doff * 4) + l4_offset; | |
5755 | csum_replace_by_diff(&l4.tcp->check, | |
5756 | (__force __wsum)htonl(paylen)); | |
5757 | } else { | |
5758 | /* compute length of segmentation header */ | |
5759 | *hdr_len = sizeof(*l4.udp) + l4_offset; | |
5760 | csum_replace_by_diff(&l4.udp->check, | |
5761 | (__force __wsum)htonl(paylen)); | |
5762 | } | |
9d5c8243 | 5763 | |
7af40ad9 AD |
5764 | /* update gso size and bytecount with header size */ |
5765 | first->gso_segs = skb_shinfo(skb)->gso_segs; | |
5766 | first->bytecount += (first->gso_segs - 1) * *hdr_len; | |
5767 | ||
9d5c8243 | 5768 | /* MSS L4LEN IDX */ |
e10715d3 | 5769 | mss_l4len_idx = (*hdr_len - l4_offset) << E1000_ADVTXD_L4LEN_SHIFT; |
7d13a7d0 | 5770 | mss_l4len_idx |= skb_shinfo(skb)->gso_size << E1000_ADVTXD_MSS_SHIFT; |
9d5c8243 | 5771 | |
7d13a7d0 | 5772 | /* VLAN MACLEN IPLEN */ |
e10715d3 AD |
5773 | vlan_macip_lens = l4.hdr - ip.hdr; |
5774 | vlan_macip_lens |= (ip.hdr - skb->data) << E1000_ADVTXD_MACLEN_SHIFT; | |
7af40ad9 | 5775 | vlan_macip_lens |= first->tx_flags & IGB_TX_FLAGS_VLAN_MASK; |
9d5c8243 | 5776 | |
3048cf84 JSP |
5777 | igb_tx_ctxtdesc(tx_ring, first, vlan_macip_lens, |
5778 | type_tucmd, mss_l4len_idx); | |
9d5c8243 | 5779 | |
7d13a7d0 | 5780 | return 1; |
9d5c8243 AK |
5781 | } |
5782 | ||
6e033700 AD |
5783 | static inline bool igb_ipv6_csum_is_sctp(struct sk_buff *skb) |
5784 | { | |
5785 | unsigned int offset = 0; | |
5786 | ||
5787 | ipv6_find_hdr(skb, &offset, IPPROTO_SCTP, NULL, NULL); | |
5788 | ||
5789 | return offset == skb_checksum_start_offset(skb); | |
5790 | } | |
5791 | ||
7af40ad9 | 5792 | static void igb_tx_csum(struct igb_ring *tx_ring, struct igb_tx_buffer *first) |
9d5c8243 | 5793 | { |
7af40ad9 | 5794 | struct sk_buff *skb = first->skb; |
7d13a7d0 | 5795 | u32 vlan_macip_lens = 0; |
7d13a7d0 | 5796 | u32 type_tucmd = 0; |
9d5c8243 | 5797 | |
7d13a7d0 | 5798 | if (skb->ip_summed != CHECKSUM_PARTIAL) { |
6e033700 | 5799 | csum_failed: |
a798fbac JSP |
5800 | if (!(first->tx_flags & IGB_TX_FLAGS_VLAN) && |
5801 | !tx_ring->launchtime_enable) | |
7af40ad9 | 5802 | return; |
6e033700 AD |
5803 | goto no_csum; |
5804 | } | |
fa4a7ef3 | 5805 | |
6e033700 AD |
5806 | switch (skb->csum_offset) { |
5807 | case offsetof(struct tcphdr, check): | |
5808 | type_tucmd = E1000_ADVTXD_TUCMD_L4T_TCP; | |
5809 | /* fall through */ | |
5810 | case offsetof(struct udphdr, check): | |
5811 | break; | |
5812 | case offsetof(struct sctphdr, checksum): | |
5813 | /* validate that this is actually an SCTP request */ | |
5814 | if (((first->protocol == htons(ETH_P_IP)) && | |
5815 | (ip_hdr(skb)->protocol == IPPROTO_SCTP)) || | |
5816 | ((first->protocol == htons(ETH_P_IPV6)) && | |
5817 | igb_ipv6_csum_is_sctp(skb))) { | |
5818 | type_tucmd = E1000_ADVTXD_TUCMD_L4T_SCTP; | |
7d13a7d0 | 5819 | break; |
9d5c8243 | 5820 | } |
7e9660ff | 5821 | /* fall through */ |
6e033700 AD |
5822 | default: |
5823 | skb_checksum_help(skb); | |
5824 | goto csum_failed; | |
7d13a7d0 | 5825 | } |
9d5c8243 | 5826 | |
6e033700 AD |
5827 | /* update TX checksum flag */ |
5828 | first->tx_flags |= IGB_TX_FLAGS_CSUM; | |
5829 | vlan_macip_lens = skb_checksum_start_offset(skb) - | |
5830 | skb_network_offset(skb); | |
5831 | no_csum: | |
7d13a7d0 | 5832 | vlan_macip_lens |= skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT; |
7af40ad9 | 5833 | vlan_macip_lens |= first->tx_flags & IGB_TX_FLAGS_VLAN_MASK; |
9d5c8243 | 5834 | |
3048cf84 | 5835 | igb_tx_ctxtdesc(tx_ring, first, vlan_macip_lens, type_tucmd, 0); |
9d5c8243 AK |
5836 | } |
5837 | ||
1d9daf45 AD |
5838 | #define IGB_SET_FLAG(_input, _flag, _result) \ |
5839 | ((_flag <= _result) ? \ | |
5840 | ((u32)(_input & _flag) * (_result / _flag)) : \ | |
5841 | ((u32)(_input & _flag) / (_flag / _result))) | |
5842 | ||
5843 | static u32 igb_tx_cmd_type(struct sk_buff *skb, u32 tx_flags) | |
e032afc8 AD |
5844 | { |
5845 | /* set type for advanced descriptor with frame checksum insertion */ | |
1d9daf45 AD |
5846 | u32 cmd_type = E1000_ADVTXD_DTYP_DATA | |
5847 | E1000_ADVTXD_DCMD_DEXT | | |
5848 | E1000_ADVTXD_DCMD_IFCS; | |
e032afc8 AD |
5849 | |
5850 | /* set HW vlan bit if vlan is present */ | |
1d9daf45 AD |
5851 | cmd_type |= IGB_SET_FLAG(tx_flags, IGB_TX_FLAGS_VLAN, |
5852 | (E1000_ADVTXD_DCMD_VLE)); | |
5853 | ||
5854 | /* set segmentation bits for TSO */ | |
5855 | cmd_type |= IGB_SET_FLAG(tx_flags, IGB_TX_FLAGS_TSO, | |
5856 | (E1000_ADVTXD_DCMD_TSE)); | |
e032afc8 AD |
5857 | |
5858 | /* set timestamp bit if present */ | |
1d9daf45 AD |
5859 | cmd_type |= IGB_SET_FLAG(tx_flags, IGB_TX_FLAGS_TSTAMP, |
5860 | (E1000_ADVTXD_MAC_TSTAMP)); | |
e032afc8 | 5861 | |
1d9daf45 AD |
5862 | /* insert frame checksum */ |
5863 | cmd_type ^= IGB_SET_FLAG(skb->no_fcs, 1, E1000_ADVTXD_DCMD_IFCS); | |
e032afc8 AD |
5864 | |
5865 | return cmd_type; | |
5866 | } | |
5867 | ||
7af40ad9 AD |
5868 | static void igb_tx_olinfo_status(struct igb_ring *tx_ring, |
5869 | union e1000_adv_tx_desc *tx_desc, | |
5870 | u32 tx_flags, unsigned int paylen) | |
e032afc8 AD |
5871 | { |
5872 | u32 olinfo_status = paylen << E1000_ADVTXD_PAYLEN_SHIFT; | |
5873 | ||
1d9daf45 AD |
5874 | /* 82575 requires a unique index per ring */ |
5875 | if (test_bit(IGB_RING_FLAG_TX_CTX_IDX, &tx_ring->flags)) | |
e032afc8 AD |
5876 | olinfo_status |= tx_ring->reg_idx << 4; |
5877 | ||
5878 | /* insert L4 checksum */ | |
1d9daf45 AD |
5879 | olinfo_status |= IGB_SET_FLAG(tx_flags, |
5880 | IGB_TX_FLAGS_CSUM, | |
5881 | (E1000_TXD_POPTS_TXSM << 8)); | |
e032afc8 | 5882 | |
1d9daf45 AD |
5883 | /* insert IPv4 checksum */ |
5884 | olinfo_status |= IGB_SET_FLAG(tx_flags, | |
5885 | IGB_TX_FLAGS_IPV4, | |
5886 | (E1000_TXD_POPTS_IXSM << 8)); | |
e032afc8 | 5887 | |
7af40ad9 | 5888 | tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status); |
e032afc8 AD |
5889 | } |
5890 | ||
6f19e12f DM |
5891 | static int __igb_maybe_stop_tx(struct igb_ring *tx_ring, const u16 size) |
5892 | { | |
5893 | struct net_device *netdev = tx_ring->netdev; | |
5894 | ||
5895 | netif_stop_subqueue(netdev, tx_ring->queue_index); | |
5896 | ||
5897 | /* Herbert's original patch had: | |
5898 | * smp_mb__after_netif_stop_queue(); | |
5899 | * but since that doesn't exist yet, just open code it. | |
5900 | */ | |
5901 | smp_mb(); | |
5902 | ||
5903 | /* We need to check again in a case another CPU has just | |
5904 | * made room available. | |
5905 | */ | |
5906 | if (igb_desc_unused(tx_ring) < size) | |
5907 | return -EBUSY; | |
5908 | ||
5909 | /* A reprieve! */ | |
5910 | netif_wake_subqueue(netdev, tx_ring->queue_index); | |
5911 | ||
5912 | u64_stats_update_begin(&tx_ring->tx_syncp2); | |
5913 | tx_ring->tx_stats.restart_queue2++; | |
5914 | u64_stats_update_end(&tx_ring->tx_syncp2); | |
5915 | ||
5916 | return 0; | |
5917 | } | |
5918 | ||
5919 | static inline int igb_maybe_stop_tx(struct igb_ring *tx_ring, const u16 size) | |
5920 | { | |
5921 | if (igb_desc_unused(tx_ring) >= size) | |
5922 | return 0; | |
5923 | return __igb_maybe_stop_tx(tx_ring, size); | |
5924 | } | |
5925 | ||
74344e32 JK |
5926 | static int igb_tx_map(struct igb_ring *tx_ring, |
5927 | struct igb_tx_buffer *first, | |
5928 | const u8 hdr_len) | |
9d5c8243 | 5929 | { |
7af40ad9 | 5930 | struct sk_buff *skb = first->skb; |
c9f14bf3 | 5931 | struct igb_tx_buffer *tx_buffer; |
ebe42d16 | 5932 | union e1000_adv_tx_desc *tx_desc; |
d7840976 | 5933 | skb_frag_t *frag; |
ebe42d16 | 5934 | dma_addr_t dma; |
80d0759e | 5935 | unsigned int data_len, size; |
7af40ad9 | 5936 | u32 tx_flags = first->tx_flags; |
1d9daf45 | 5937 | u32 cmd_type = igb_tx_cmd_type(skb, tx_flags); |
ebe42d16 | 5938 | u16 i = tx_ring->next_to_use; |
ebe42d16 AD |
5939 | |
5940 | tx_desc = IGB_TX_DESC(tx_ring, i); | |
5941 | ||
80d0759e AD |
5942 | igb_tx_olinfo_status(tx_ring, tx_desc, tx_flags, skb->len - hdr_len); |
5943 | ||
5944 | size = skb_headlen(skb); | |
5945 | data_len = skb->data_len; | |
ebe42d16 AD |
5946 | |
5947 | dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE); | |
9d5c8243 | 5948 | |
80d0759e AD |
5949 | tx_buffer = first; |
5950 | ||
5951 | for (frag = &skb_shinfo(skb)->frags[0];; frag++) { | |
5952 | if (dma_mapping_error(tx_ring->dev, dma)) | |
5953 | goto dma_error; | |
5954 | ||
5955 | /* record length, and DMA address */ | |
5956 | dma_unmap_len_set(tx_buffer, len, size); | |
5957 | dma_unmap_addr_set(tx_buffer, dma, dma); | |
5958 | ||
5959 | tx_desc->read.buffer_addr = cpu_to_le64(dma); | |
ebe42d16 | 5960 | |
ebe42d16 AD |
5961 | while (unlikely(size > IGB_MAX_DATA_PER_TXD)) { |
5962 | tx_desc->read.cmd_type_len = | |
1d9daf45 | 5963 | cpu_to_le32(cmd_type ^ IGB_MAX_DATA_PER_TXD); |
ebe42d16 AD |
5964 | |
5965 | i++; | |
5966 | tx_desc++; | |
5967 | if (i == tx_ring->count) { | |
5968 | tx_desc = IGB_TX_DESC(tx_ring, 0); | |
5969 | i = 0; | |
5970 | } | |
80d0759e | 5971 | tx_desc->read.olinfo_status = 0; |
ebe42d16 AD |
5972 | |
5973 | dma += IGB_MAX_DATA_PER_TXD; | |
5974 | size -= IGB_MAX_DATA_PER_TXD; | |
5975 | ||
ebe42d16 AD |
5976 | tx_desc->read.buffer_addr = cpu_to_le64(dma); |
5977 | } | |
5978 | ||
5979 | if (likely(!data_len)) | |
5980 | break; | |
2bbfebe2 | 5981 | |
1d9daf45 | 5982 | tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type ^ size); |
9d5c8243 | 5983 | |
65689fef | 5984 | i++; |
ebe42d16 AD |
5985 | tx_desc++; |
5986 | if (i == tx_ring->count) { | |
5987 | tx_desc = IGB_TX_DESC(tx_ring, 0); | |
65689fef | 5988 | i = 0; |
ebe42d16 | 5989 | } |
80d0759e | 5990 | tx_desc->read.olinfo_status = 0; |
65689fef | 5991 | |
9e903e08 | 5992 | size = skb_frag_size(frag); |
ebe42d16 AD |
5993 | data_len -= size; |
5994 | ||
5995 | dma = skb_frag_dma_map(tx_ring->dev, frag, 0, | |
80d0759e | 5996 | size, DMA_TO_DEVICE); |
6366ad33 | 5997 | |
c9f14bf3 | 5998 | tx_buffer = &tx_ring->tx_buffer_info[i]; |
9d5c8243 AK |
5999 | } |
6000 | ||
ebe42d16 | 6001 | /* write last descriptor with RS and EOP bits */ |
1d9daf45 AD |
6002 | cmd_type |= size | IGB_TXD_DCMD; |
6003 | tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type); | |
8542db05 | 6004 | |
80d0759e AD |
6005 | netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount); |
6006 | ||
8542db05 AD |
6007 | /* set the timestamp */ |
6008 | first->time_stamp = jiffies; | |
6009 | ||
a9e51058 JK |
6010 | skb_tx_timestamp(skb); |
6011 | ||
b980ac18 | 6012 | /* Force memory writes to complete before letting h/w know there |
ebe42d16 AD |
6013 | * are new descriptors to fetch. (Only applicable for weak-ordered |
6014 | * memory model archs, such as IA-64). | |
6015 | * | |
6016 | * We also need this memory barrier to make certain all of the | |
6017 | * status bits have been updated before next_to_watch is written. | |
6018 | */ | |
73017f4e | 6019 | dma_wmb(); |
ebe42d16 | 6020 | |
8542db05 | 6021 | /* set next_to_watch value indicating a packet is present */ |
ebe42d16 | 6022 | first->next_to_watch = tx_desc; |
9d5c8243 | 6023 | |
ebe42d16 AD |
6024 | i++; |
6025 | if (i == tx_ring->count) | |
6026 | i = 0; | |
6366ad33 | 6027 | |
ebe42d16 | 6028 | tx_ring->next_to_use = i; |
6366ad33 | 6029 | |
6f19e12f DM |
6030 | /* Make sure there is space in the ring for the next send. */ |
6031 | igb_maybe_stop_tx(tx_ring, DESC_NEEDED); | |
6032 | ||
6b16f9ee | 6033 | if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) { |
0b725a2c | 6034 | writel(i, tx_ring->tail); |
0b725a2c | 6035 | } |
74344e32 | 6036 | return 0; |
ebe42d16 AD |
6037 | |
6038 | dma_error: | |
6039 | dev_err(tx_ring->dev, "TX DMA map failed\n"); | |
7cc6fd4c | 6040 | tx_buffer = &tx_ring->tx_buffer_info[i]; |
ebe42d16 AD |
6041 | |
6042 | /* clear dma mappings for failed tx_buffer_info map */ | |
7cc6fd4c AD |
6043 | while (tx_buffer != first) { |
6044 | if (dma_unmap_len(tx_buffer, len)) | |
6045 | dma_unmap_page(tx_ring->dev, | |
6046 | dma_unmap_addr(tx_buffer, dma), | |
6047 | dma_unmap_len(tx_buffer, len), | |
6048 | DMA_TO_DEVICE); | |
6049 | dma_unmap_len_set(tx_buffer, len, 0); | |
6050 | ||
104ba833 | 6051 | if (i-- == 0) |
7cc6fd4c | 6052 | i += tx_ring->count; |
c9f14bf3 | 6053 | tx_buffer = &tx_ring->tx_buffer_info[i]; |
6366ad33 AD |
6054 | } |
6055 | ||
7cc6fd4c AD |
6056 | if (dma_unmap_len(tx_buffer, len)) |
6057 | dma_unmap_single(tx_ring->dev, | |
6058 | dma_unmap_addr(tx_buffer, dma), | |
6059 | dma_unmap_len(tx_buffer, len), | |
6060 | DMA_TO_DEVICE); | |
6061 | dma_unmap_len_set(tx_buffer, len, 0); | |
6062 | ||
6063 | dev_kfree_skb_any(tx_buffer->skb); | |
6064 | tx_buffer->skb = NULL; | |
6065 | ||
9d5c8243 | 6066 | tx_ring->next_to_use = i; |
74344e32 JK |
6067 | |
6068 | return -1; | |
9d5c8243 AK |
6069 | } |
6070 | ||
cd392f5c AD |
6071 | netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb, |
6072 | struct igb_ring *tx_ring) | |
9d5c8243 | 6073 | { |
8542db05 | 6074 | struct igb_tx_buffer *first; |
ebe42d16 | 6075 | int tso; |
91d4ee33 | 6076 | u32 tx_flags = 0; |
2ee52ad4 | 6077 | unsigned short f; |
21ba6fe1 | 6078 | u16 count = TXD_USE_COUNT(skb_headlen(skb)); |
31f6adbb | 6079 | __be16 protocol = vlan_get_protocol(skb); |
91d4ee33 | 6080 | u8 hdr_len = 0; |
9d5c8243 | 6081 | |
21ba6fe1 AD |
6082 | /* need: 1 descriptor per page * PAGE_SIZE/IGB_MAX_DATA_PER_TXD, |
6083 | * + 1 desc for skb_headlen/IGB_MAX_DATA_PER_TXD, | |
9d5c8243 | 6084 | * + 2 desc gap to keep tail from touching head, |
9d5c8243 | 6085 | * + 1 desc for context descriptor, |
21ba6fe1 AD |
6086 | * otherwise try next time |
6087 | */ | |
2ee52ad4 | 6088 | for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) |
d7840976 MWO |
6089 | count += TXD_USE_COUNT(skb_frag_size( |
6090 | &skb_shinfo(skb)->frags[f])); | |
21ba6fe1 AD |
6091 | |
6092 | if (igb_maybe_stop_tx(tx_ring, count + 3)) { | |
9d5c8243 | 6093 | /* this is a hard error */ |
9d5c8243 AK |
6094 | return NETDEV_TX_BUSY; |
6095 | } | |
33af6bcc | 6096 | |
7af40ad9 AD |
6097 | /* record the location of the first descriptor for this packet */ |
6098 | first = &tx_ring->tx_buffer_info[tx_ring->next_to_use]; | |
6099 | first->skb = skb; | |
6100 | first->bytecount = skb->len; | |
6101 | first->gso_segs = 1; | |
6102 | ||
b646c22e AD |
6103 | if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) { |
6104 | struct igb_adapter *adapter = netdev_priv(tx_ring->netdev); | |
1f6e8178 | 6105 | |
0a6f2f05 | 6106 | if (adapter->tstamp_config.tx_type == HWTSTAMP_TX_ON && |
26bd4e2d | 6107 | !test_and_set_bit_lock(__IGB_PTP_TX_IN_PROGRESS, |
ed4420a3 | 6108 | &adapter->state)) { |
b646c22e AD |
6109 | skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; |
6110 | tx_flags |= IGB_TX_FLAGS_TSTAMP; | |
6111 | ||
6112 | adapter->ptp_tx_skb = skb_get(skb); | |
6113 | adapter->ptp_tx_start = jiffies; | |
6114 | if (adapter->hw.mac.type == e1000_82576) | |
6115 | schedule_work(&adapter->ptp_tx_work); | |
c3b8f85e JK |
6116 | } else { |
6117 | adapter->tx_hwtstamp_skipped++; | |
b646c22e | 6118 | } |
33af6bcc | 6119 | } |
9d5c8243 | 6120 | |
df8a39de | 6121 | if (skb_vlan_tag_present(skb)) { |
9d5c8243 | 6122 | tx_flags |= IGB_TX_FLAGS_VLAN; |
df8a39de | 6123 | tx_flags |= (skb_vlan_tag_get(skb) << IGB_TX_FLAGS_VLAN_SHIFT); |
9d5c8243 AK |
6124 | } |
6125 | ||
7af40ad9 AD |
6126 | /* record initial flags and protocol */ |
6127 | first->tx_flags = tx_flags; | |
6128 | first->protocol = protocol; | |
cdfd01fc | 6129 | |
7af40ad9 AD |
6130 | tso = igb_tso(tx_ring, first, &hdr_len); |
6131 | if (tso < 0) | |
7d13a7d0 | 6132 | goto out_drop; |
7af40ad9 AD |
6133 | else if (!tso) |
6134 | igb_tx_csum(tx_ring, first); | |
9d5c8243 | 6135 | |
74344e32 JK |
6136 | if (igb_tx_map(tx_ring, first, hdr_len)) |
6137 | goto cleanup_tx_tstamp; | |
85ad76b2 | 6138 | |
9d5c8243 | 6139 | return NETDEV_TX_OK; |
7d13a7d0 AD |
6140 | |
6141 | out_drop: | |
7cc6fd4c AD |
6142 | dev_kfree_skb_any(first->skb); |
6143 | first->skb = NULL; | |
74344e32 JK |
6144 | cleanup_tx_tstamp: |
6145 | if (unlikely(tx_flags & IGB_TX_FLAGS_TSTAMP)) { | |
6146 | struct igb_adapter *adapter = netdev_priv(tx_ring->netdev); | |
6147 | ||
6148 | dev_kfree_skb_any(adapter->ptp_tx_skb); | |
6149 | adapter->ptp_tx_skb = NULL; | |
6150 | if (adapter->hw.mac.type == e1000_82576) | |
6151 | cancel_work_sync(&adapter->ptp_tx_work); | |
6152 | clear_bit_unlock(__IGB_PTP_TX_IN_PROGRESS, &adapter->state); | |
6153 | } | |
7af40ad9 | 6154 | |
7d13a7d0 | 6155 | return NETDEV_TX_OK; |
9d5c8243 AK |
6156 | } |
6157 | ||
0b725a2c DM |
6158 | static inline struct igb_ring *igb_tx_queue_mapping(struct igb_adapter *adapter, |
6159 | struct sk_buff *skb) | |
1cc3bd87 | 6160 | { |
0b725a2c DM |
6161 | unsigned int r_idx = skb->queue_mapping; |
6162 | ||
1cc3bd87 AD |
6163 | if (r_idx >= adapter->num_tx_queues) |
6164 | r_idx = r_idx % adapter->num_tx_queues; | |
6165 | ||
6166 | return adapter->tx_ring[r_idx]; | |
6167 | } | |
6168 | ||
cd392f5c AD |
6169 | static netdev_tx_t igb_xmit_frame(struct sk_buff *skb, |
6170 | struct net_device *netdev) | |
9d5c8243 AK |
6171 | { |
6172 | struct igb_adapter *adapter = netdev_priv(netdev); | |
b1a436c3 | 6173 | |
b980ac18 | 6174 | /* The minimum packet size with TCTL.PSP set is 17 so pad the skb |
1cc3bd87 AD |
6175 | * in order to meet this minimum size requirement. |
6176 | */ | |
a94d9e22 AD |
6177 | if (skb_put_padto(skb, 17)) |
6178 | return NETDEV_TX_OK; | |
9d5c8243 | 6179 | |
1cc3bd87 | 6180 | return igb_xmit_frame_ring(skb, igb_tx_queue_mapping(adapter, skb)); |
9d5c8243 AK |
6181 | } |
6182 | ||
6183 | /** | |
b980ac18 JK |
6184 | * igb_tx_timeout - Respond to a Tx Hang |
6185 | * @netdev: network interface device structure | |
9d5c8243 AK |
6186 | **/ |
6187 | static void igb_tx_timeout(struct net_device *netdev) | |
6188 | { | |
6189 | struct igb_adapter *adapter = netdev_priv(netdev); | |
6190 | struct e1000_hw *hw = &adapter->hw; | |
6191 | ||
6192 | /* Do the reset outside of interrupt context */ | |
6193 | adapter->tx_timeout_count++; | |
f7ba205e | 6194 | |
06218a8d | 6195 | if (hw->mac.type >= e1000_82580) |
55cac248 AD |
6196 | hw->dev_spec._82575.global_device_reset = true; |
6197 | ||
9d5c8243 | 6198 | schedule_work(&adapter->reset_task); |
265de409 AD |
6199 | wr32(E1000_EICS, |
6200 | (adapter->eims_enable_mask & ~adapter->eims_other)); | |
9d5c8243 AK |
6201 | } |
6202 | ||
6203 | static void igb_reset_task(struct work_struct *work) | |
6204 | { | |
6205 | struct igb_adapter *adapter; | |
6206 | adapter = container_of(work, struct igb_adapter, reset_task); | |
6207 | ||
c97ec42a TI |
6208 | igb_dump(adapter); |
6209 | netdev_err(adapter->netdev, "Reset adapter\n"); | |
9d5c8243 AK |
6210 | igb_reinit_locked(adapter); |
6211 | } | |
6212 | ||
6213 | /** | |
b980ac18 JK |
6214 | * igb_get_stats64 - Get System Network Statistics |
6215 | * @netdev: network interface device structure | |
6216 | * @stats: rtnl_link_stats64 pointer | |
9d5c8243 | 6217 | **/ |
bc1f4470 | 6218 | static void igb_get_stats64(struct net_device *netdev, |
6219 | struct rtnl_link_stats64 *stats) | |
9d5c8243 | 6220 | { |
12dcd86b ED |
6221 | struct igb_adapter *adapter = netdev_priv(netdev); |
6222 | ||
5642e27b | 6223 | spin_lock(&adapter->stats64_lock); |
81e3f64a | 6224 | igb_update_stats(adapter); |
12dcd86b | 6225 | memcpy(stats, &adapter->stats64, sizeof(*stats)); |
5642e27b | 6226 | spin_unlock(&adapter->stats64_lock); |
9d5c8243 AK |
6227 | } |
6228 | ||
6229 | /** | |
b980ac18 JK |
6230 | * igb_change_mtu - Change the Maximum Transfer Unit |
6231 | * @netdev: network interface device structure | |
6232 | * @new_mtu: new value for maximum frame size | |
9d5c8243 | 6233 | * |
b980ac18 | 6234 | * Returns 0 on success, negative on failure |
9d5c8243 AK |
6235 | **/ |
6236 | static int igb_change_mtu(struct net_device *netdev, int new_mtu) | |
6237 | { | |
6238 | struct igb_adapter *adapter = netdev_priv(netdev); | |
153285f9 | 6239 | int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; |
9d5c8243 | 6240 | |
2ccd994c AD |
6241 | /* adjust max frame to be at least the size of a standard frame */ |
6242 | if (max_frame < (ETH_FRAME_LEN + ETH_FCS_LEN)) | |
6243 | max_frame = ETH_FRAME_LEN + ETH_FCS_LEN; | |
6244 | ||
9d5c8243 | 6245 | while (test_and_set_bit(__IGB_RESETTING, &adapter->state)) |
0d451e79 | 6246 | usleep_range(1000, 2000); |
73cd78f1 | 6247 | |
9d5c8243 AK |
6248 | /* igb_down has a dependency on max_frame_size */ |
6249 | adapter->max_frame_size = max_frame; | |
559e9c49 | 6250 | |
4c844851 AD |
6251 | if (netif_running(netdev)) |
6252 | igb_down(adapter); | |
9d5c8243 | 6253 | |
12299132 FF |
6254 | netdev_dbg(netdev, "changing MTU from %d to %d\n", |
6255 | netdev->mtu, new_mtu); | |
9d5c8243 AK |
6256 | netdev->mtu = new_mtu; |
6257 | ||
6258 | if (netif_running(netdev)) | |
6259 | igb_up(adapter); | |
6260 | else | |
6261 | igb_reset(adapter); | |
6262 | ||
6263 | clear_bit(__IGB_RESETTING, &adapter->state); | |
6264 | ||
6265 | return 0; | |
6266 | } | |
6267 | ||
6268 | /** | |
b980ac18 JK |
6269 | * igb_update_stats - Update the board statistics counters |
6270 | * @adapter: board private structure | |
9d5c8243 | 6271 | **/ |
81e3f64a | 6272 | void igb_update_stats(struct igb_adapter *adapter) |
9d5c8243 | 6273 | { |
81e3f64a | 6274 | struct rtnl_link_stats64 *net_stats = &adapter->stats64; |
9d5c8243 AK |
6275 | struct e1000_hw *hw = &adapter->hw; |
6276 | struct pci_dev *pdev = adapter->pdev; | |
fa3d9a6d | 6277 | u32 reg, mpc; |
3f9c0164 AD |
6278 | int i; |
6279 | u64 bytes, packets; | |
12dcd86b ED |
6280 | unsigned int start; |
6281 | u64 _bytes, _packets; | |
9d5c8243 | 6282 | |
b980ac18 | 6283 | /* Prevent stats update while adapter is being reset, or if the pci |
9d5c8243 AK |
6284 | * connection is down. |
6285 | */ | |
6286 | if (adapter->link_speed == 0) | |
6287 | return; | |
6288 | if (pci_channel_offline(pdev)) | |
6289 | return; | |
6290 | ||
3f9c0164 AD |
6291 | bytes = 0; |
6292 | packets = 0; | |
7f90128e AA |
6293 | |
6294 | rcu_read_lock(); | |
3f9c0164 | 6295 | for (i = 0; i < adapter->num_rx_queues; i++) { |
3025a446 | 6296 | struct igb_ring *ring = adapter->rx_ring[i]; |
e66c083a TF |
6297 | u32 rqdpc = rd32(E1000_RQDPC(i)); |
6298 | if (hw->mac.type >= e1000_i210) | |
6299 | wr32(E1000_RQDPC(i), 0); | |
12dcd86b | 6300 | |
ae1c07a6 AD |
6301 | if (rqdpc) { |
6302 | ring->rx_stats.drops += rqdpc; | |
6303 | net_stats->rx_fifo_errors += rqdpc; | |
6304 | } | |
12dcd86b ED |
6305 | |
6306 | do { | |
57a7744e | 6307 | start = u64_stats_fetch_begin_irq(&ring->rx_syncp); |
12dcd86b ED |
6308 | _bytes = ring->rx_stats.bytes; |
6309 | _packets = ring->rx_stats.packets; | |
57a7744e | 6310 | } while (u64_stats_fetch_retry_irq(&ring->rx_syncp, start)); |
12dcd86b ED |
6311 | bytes += _bytes; |
6312 | packets += _packets; | |
3f9c0164 AD |
6313 | } |
6314 | ||
128e45eb AD |
6315 | net_stats->rx_bytes = bytes; |
6316 | net_stats->rx_packets = packets; | |
3f9c0164 AD |
6317 | |
6318 | bytes = 0; | |
6319 | packets = 0; | |
6320 | for (i = 0; i < adapter->num_tx_queues; i++) { | |
3025a446 | 6321 | struct igb_ring *ring = adapter->tx_ring[i]; |
12dcd86b | 6322 | do { |
57a7744e | 6323 | start = u64_stats_fetch_begin_irq(&ring->tx_syncp); |
12dcd86b ED |
6324 | _bytes = ring->tx_stats.bytes; |
6325 | _packets = ring->tx_stats.packets; | |
57a7744e | 6326 | } while (u64_stats_fetch_retry_irq(&ring->tx_syncp, start)); |
12dcd86b ED |
6327 | bytes += _bytes; |
6328 | packets += _packets; | |
3f9c0164 | 6329 | } |
128e45eb AD |
6330 | net_stats->tx_bytes = bytes; |
6331 | net_stats->tx_packets = packets; | |
7f90128e | 6332 | rcu_read_unlock(); |
3f9c0164 AD |
6333 | |
6334 | /* read stats registers */ | |
9d5c8243 AK |
6335 | adapter->stats.crcerrs += rd32(E1000_CRCERRS); |
6336 | adapter->stats.gprc += rd32(E1000_GPRC); | |
6337 | adapter->stats.gorc += rd32(E1000_GORCL); | |
6338 | rd32(E1000_GORCH); /* clear GORCL */ | |
6339 | adapter->stats.bprc += rd32(E1000_BPRC); | |
6340 | adapter->stats.mprc += rd32(E1000_MPRC); | |
6341 | adapter->stats.roc += rd32(E1000_ROC); | |
6342 | ||
6343 | adapter->stats.prc64 += rd32(E1000_PRC64); | |
6344 | adapter->stats.prc127 += rd32(E1000_PRC127); | |
6345 | adapter->stats.prc255 += rd32(E1000_PRC255); | |
6346 | adapter->stats.prc511 += rd32(E1000_PRC511); | |
6347 | adapter->stats.prc1023 += rd32(E1000_PRC1023); | |
6348 | adapter->stats.prc1522 += rd32(E1000_PRC1522); | |
6349 | adapter->stats.symerrs += rd32(E1000_SYMERRS); | |
6350 | adapter->stats.sec += rd32(E1000_SEC); | |
6351 | ||
fa3d9a6d MW |
6352 | mpc = rd32(E1000_MPC); |
6353 | adapter->stats.mpc += mpc; | |
6354 | net_stats->rx_fifo_errors += mpc; | |
9d5c8243 AK |
6355 | adapter->stats.scc += rd32(E1000_SCC); |
6356 | adapter->stats.ecol += rd32(E1000_ECOL); | |
6357 | adapter->stats.mcc += rd32(E1000_MCC); | |
6358 | adapter->stats.latecol += rd32(E1000_LATECOL); | |
6359 | adapter->stats.dc += rd32(E1000_DC); | |
6360 | adapter->stats.rlec += rd32(E1000_RLEC); | |
6361 | adapter->stats.xonrxc += rd32(E1000_XONRXC); | |
6362 | adapter->stats.xontxc += rd32(E1000_XONTXC); | |
6363 | adapter->stats.xoffrxc += rd32(E1000_XOFFRXC); | |
6364 | adapter->stats.xofftxc += rd32(E1000_XOFFTXC); | |
6365 | adapter->stats.fcruc += rd32(E1000_FCRUC); | |
6366 | adapter->stats.gptc += rd32(E1000_GPTC); | |
6367 | adapter->stats.gotc += rd32(E1000_GOTCL); | |
6368 | rd32(E1000_GOTCH); /* clear GOTCL */ | |
fa3d9a6d | 6369 | adapter->stats.rnbc += rd32(E1000_RNBC); |
9d5c8243 AK |
6370 | adapter->stats.ruc += rd32(E1000_RUC); |
6371 | adapter->stats.rfc += rd32(E1000_RFC); | |
6372 | adapter->stats.rjc += rd32(E1000_RJC); | |
6373 | adapter->stats.tor += rd32(E1000_TORH); | |
6374 | adapter->stats.tot += rd32(E1000_TOTH); | |
6375 | adapter->stats.tpr += rd32(E1000_TPR); | |
6376 | ||
6377 | adapter->stats.ptc64 += rd32(E1000_PTC64); | |
6378 | adapter->stats.ptc127 += rd32(E1000_PTC127); | |
6379 | adapter->stats.ptc255 += rd32(E1000_PTC255); | |
6380 | adapter->stats.ptc511 += rd32(E1000_PTC511); | |
6381 | adapter->stats.ptc1023 += rd32(E1000_PTC1023); | |
6382 | adapter->stats.ptc1522 += rd32(E1000_PTC1522); | |
6383 | ||
6384 | adapter->stats.mptc += rd32(E1000_MPTC); | |
6385 | adapter->stats.bptc += rd32(E1000_BPTC); | |
6386 | ||
2d0b0f69 NN |
6387 | adapter->stats.tpt += rd32(E1000_TPT); |
6388 | adapter->stats.colc += rd32(E1000_COLC); | |
9d5c8243 AK |
6389 | |
6390 | adapter->stats.algnerrc += rd32(E1000_ALGNERRC); | |
43915c7c NN |
6391 | /* read internal phy specific stats */ |
6392 | reg = rd32(E1000_CTRL_EXT); | |
6393 | if (!(reg & E1000_CTRL_EXT_LINK_MODE_MASK)) { | |
6394 | adapter->stats.rxerrc += rd32(E1000_RXERRC); | |
3dbdf969 CW |
6395 | |
6396 | /* this stat has invalid values on i210/i211 */ | |
6397 | if ((hw->mac.type != e1000_i210) && | |
6398 | (hw->mac.type != e1000_i211)) | |
6399 | adapter->stats.tncrs += rd32(E1000_TNCRS); | |
43915c7c NN |
6400 | } |
6401 | ||
9d5c8243 AK |
6402 | adapter->stats.tsctc += rd32(E1000_TSCTC); |
6403 | adapter->stats.tsctfc += rd32(E1000_TSCTFC); | |
6404 | ||
6405 | adapter->stats.iac += rd32(E1000_IAC); | |
6406 | adapter->stats.icrxoc += rd32(E1000_ICRXOC); | |
6407 | adapter->stats.icrxptc += rd32(E1000_ICRXPTC); | |
6408 | adapter->stats.icrxatc += rd32(E1000_ICRXATC); | |
6409 | adapter->stats.ictxptc += rd32(E1000_ICTXPTC); | |
6410 | adapter->stats.ictxatc += rd32(E1000_ICTXATC); | |
6411 | adapter->stats.ictxqec += rd32(E1000_ICTXQEC); | |
6412 | adapter->stats.ictxqmtc += rd32(E1000_ICTXQMTC); | |
6413 | adapter->stats.icrxdmtc += rd32(E1000_ICRXDMTC); | |
6414 | ||
6415 | /* Fill out the OS statistics structure */ | |
128e45eb AD |
6416 | net_stats->multicast = adapter->stats.mprc; |
6417 | net_stats->collisions = adapter->stats.colc; | |
9d5c8243 AK |
6418 | |
6419 | /* Rx Errors */ | |
6420 | ||
6421 | /* RLEC on some newer hardware can be incorrect so build | |
b980ac18 JK |
6422 | * our own version based on RUC and ROC |
6423 | */ | |
128e45eb | 6424 | net_stats->rx_errors = adapter->stats.rxerrc + |
9d5c8243 AK |
6425 | adapter->stats.crcerrs + adapter->stats.algnerrc + |
6426 | adapter->stats.ruc + adapter->stats.roc + | |
6427 | adapter->stats.cexterr; | |
128e45eb AD |
6428 | net_stats->rx_length_errors = adapter->stats.ruc + |
6429 | adapter->stats.roc; | |
6430 | net_stats->rx_crc_errors = adapter->stats.crcerrs; | |
6431 | net_stats->rx_frame_errors = adapter->stats.algnerrc; | |
6432 | net_stats->rx_missed_errors = adapter->stats.mpc; | |
9d5c8243 AK |
6433 | |
6434 | /* Tx Errors */ | |
128e45eb AD |
6435 | net_stats->tx_errors = adapter->stats.ecol + |
6436 | adapter->stats.latecol; | |
6437 | net_stats->tx_aborted_errors = adapter->stats.ecol; | |
6438 | net_stats->tx_window_errors = adapter->stats.latecol; | |
6439 | net_stats->tx_carrier_errors = adapter->stats.tncrs; | |
9d5c8243 AK |
6440 | |
6441 | /* Tx Dropped needs to be maintained elsewhere */ | |
6442 | ||
9d5c8243 AK |
6443 | /* Management Stats */ |
6444 | adapter->stats.mgptc += rd32(E1000_MGTPTC); | |
6445 | adapter->stats.mgprc += rd32(E1000_MGTPRC); | |
6446 | adapter->stats.mgpdc += rd32(E1000_MGTPDC); | |
0a915b95 CW |
6447 | |
6448 | /* OS2BMC Stats */ | |
6449 | reg = rd32(E1000_MANC); | |
6450 | if (reg & E1000_MANC_EN_BMC2OS) { | |
6451 | adapter->stats.o2bgptc += rd32(E1000_O2BGPTC); | |
6452 | adapter->stats.o2bspc += rd32(E1000_O2BSPC); | |
6453 | adapter->stats.b2ospc += rd32(E1000_B2OSPC); | |
6454 | adapter->stats.b2ogprc += rd32(E1000_B2OGPRC); | |
6455 | } | |
9d5c8243 AK |
6456 | } |
6457 | ||
61d7f75f RC |
6458 | static void igb_tsync_interrupt(struct igb_adapter *adapter) |
6459 | { | |
6460 | struct e1000_hw *hw = &adapter->hw; | |
00c65578 | 6461 | struct ptp_clock_event event; |
40c9b079 | 6462 | struct timespec64 ts; |
720db4ff | 6463 | u32 ack = 0, tsauxc, sec, nsec, tsicr = rd32(E1000_TSICR); |
00c65578 RC |
6464 | |
6465 | if (tsicr & TSINTR_SYS_WRAP) { | |
6466 | event.type = PTP_CLOCK_PPS; | |
6467 | if (adapter->ptp_caps.pps) | |
6468 | ptp_clock_event(adapter->ptp_clock, &event); | |
00c65578 RC |
6469 | ack |= TSINTR_SYS_WRAP; |
6470 | } | |
61d7f75f RC |
6471 | |
6472 | if (tsicr & E1000_TSICR_TXTS) { | |
61d7f75f RC |
6473 | /* retrieve hardware timestamp */ |
6474 | schedule_work(&adapter->ptp_tx_work); | |
00c65578 | 6475 | ack |= E1000_TSICR_TXTS; |
61d7f75f | 6476 | } |
00c65578 | 6477 | |
720db4ff RC |
6478 | if (tsicr & TSINTR_TT0) { |
6479 | spin_lock(&adapter->tmreg_lock); | |
40c9b079 AB |
6480 | ts = timespec64_add(adapter->perout[0].start, |
6481 | adapter->perout[0].period); | |
6482 | /* u32 conversion of tv_sec is safe until y2106 */ | |
720db4ff | 6483 | wr32(E1000_TRGTTIML0, ts.tv_nsec); |
40c9b079 | 6484 | wr32(E1000_TRGTTIMH0, (u32)ts.tv_sec); |
720db4ff RC |
6485 | tsauxc = rd32(E1000_TSAUXC); |
6486 | tsauxc |= TSAUXC_EN_TT0; | |
6487 | wr32(E1000_TSAUXC, tsauxc); | |
6488 | adapter->perout[0].start = ts; | |
6489 | spin_unlock(&adapter->tmreg_lock); | |
6490 | ack |= TSINTR_TT0; | |
6491 | } | |
6492 | ||
6493 | if (tsicr & TSINTR_TT1) { | |
6494 | spin_lock(&adapter->tmreg_lock); | |
40c9b079 AB |
6495 | ts = timespec64_add(adapter->perout[1].start, |
6496 | adapter->perout[1].period); | |
720db4ff | 6497 | wr32(E1000_TRGTTIML1, ts.tv_nsec); |
40c9b079 | 6498 | wr32(E1000_TRGTTIMH1, (u32)ts.tv_sec); |
720db4ff RC |
6499 | tsauxc = rd32(E1000_TSAUXC); |
6500 | tsauxc |= TSAUXC_EN_TT1; | |
6501 | wr32(E1000_TSAUXC, tsauxc); | |
6502 | adapter->perout[1].start = ts; | |
6503 | spin_unlock(&adapter->tmreg_lock); | |
6504 | ack |= TSINTR_TT1; | |
6505 | } | |
6506 | ||
6507 | if (tsicr & TSINTR_AUTT0) { | |
6508 | nsec = rd32(E1000_AUXSTMPL0); | |
6509 | sec = rd32(E1000_AUXSTMPH0); | |
6510 | event.type = PTP_CLOCK_EXTTS; | |
6511 | event.index = 0; | |
6512 | event.timestamp = sec * 1000000000ULL + nsec; | |
6513 | ptp_clock_event(adapter->ptp_clock, &event); | |
6514 | ack |= TSINTR_AUTT0; | |
6515 | } | |
6516 | ||
6517 | if (tsicr & TSINTR_AUTT1) { | |
6518 | nsec = rd32(E1000_AUXSTMPL1); | |
6519 | sec = rd32(E1000_AUXSTMPH1); | |
6520 | event.type = PTP_CLOCK_EXTTS; | |
6521 | event.index = 1; | |
6522 | event.timestamp = sec * 1000000000ULL + nsec; | |
6523 | ptp_clock_event(adapter->ptp_clock, &event); | |
6524 | ack |= TSINTR_AUTT1; | |
6525 | } | |
6526 | ||
00c65578 RC |
6527 | /* acknowledge the interrupts */ |
6528 | wr32(E1000_TSICR, ack); | |
61d7f75f RC |
6529 | } |
6530 | ||
9d5c8243 AK |
6531 | static irqreturn_t igb_msix_other(int irq, void *data) |
6532 | { | |
047e0030 | 6533 | struct igb_adapter *adapter = data; |
9d5c8243 | 6534 | struct e1000_hw *hw = &adapter->hw; |
844290e5 | 6535 | u32 icr = rd32(E1000_ICR); |
844290e5 | 6536 | /* reading ICR causes bit 31 of EICR to be cleared */ |
dda0e083 | 6537 | |
7f081d40 AD |
6538 | if (icr & E1000_ICR_DRSTA) |
6539 | schedule_work(&adapter->reset_task); | |
6540 | ||
047e0030 | 6541 | if (icr & E1000_ICR_DOUTSYNC) { |
dda0e083 AD |
6542 | /* HW is reporting DMA is out of sync */ |
6543 | adapter->stats.doosync++; | |
13800469 GR |
6544 | /* The DMA Out of Sync is also indication of a spoof event |
6545 | * in IOV mode. Check the Wrong VM Behavior register to | |
b980ac18 JK |
6546 | * see if it is really a spoof event. |
6547 | */ | |
13800469 | 6548 | igb_check_wvbr(adapter); |
dda0e083 | 6549 | } |
eebbbdba | 6550 | |
4ae196df AD |
6551 | /* Check for a mailbox event */ |
6552 | if (icr & E1000_ICR_VMMB) | |
6553 | igb_msg_task(adapter); | |
6554 | ||
6555 | if (icr & E1000_ICR_LSC) { | |
6556 | hw->mac.get_link_status = 1; | |
6557 | /* guard against interrupt when we're going down */ | |
6558 | if (!test_bit(__IGB_DOWN, &adapter->state)) | |
6559 | mod_timer(&adapter->watchdog_timer, jiffies + 1); | |
6560 | } | |
6561 | ||
61d7f75f RC |
6562 | if (icr & E1000_ICR_TS) |
6563 | igb_tsync_interrupt(adapter); | |
1f6e8178 | 6564 | |
844290e5 | 6565 | wr32(E1000_EIMS, adapter->eims_other); |
9d5c8243 AK |
6566 | |
6567 | return IRQ_HANDLED; | |
6568 | } | |
6569 | ||
047e0030 | 6570 | static void igb_write_itr(struct igb_q_vector *q_vector) |
9d5c8243 | 6571 | { |
26b39276 | 6572 | struct igb_adapter *adapter = q_vector->adapter; |
047e0030 | 6573 | u32 itr_val = q_vector->itr_val & 0x7FFC; |
9d5c8243 | 6574 | |
047e0030 AD |
6575 | if (!q_vector->set_itr) |
6576 | return; | |
73cd78f1 | 6577 | |
047e0030 AD |
6578 | if (!itr_val) |
6579 | itr_val = 0x4; | |
661086df | 6580 | |
26b39276 AD |
6581 | if (adapter->hw.mac.type == e1000_82575) |
6582 | itr_val |= itr_val << 16; | |
661086df | 6583 | else |
0ba82994 | 6584 | itr_val |= E1000_EITR_CNT_IGNR; |
661086df | 6585 | |
047e0030 AD |
6586 | writel(itr_val, q_vector->itr_register); |
6587 | q_vector->set_itr = 0; | |
6eb5a7f1 AD |
6588 | } |
6589 | ||
047e0030 | 6590 | static irqreturn_t igb_msix_ring(int irq, void *data) |
9d5c8243 | 6591 | { |
047e0030 | 6592 | struct igb_q_vector *q_vector = data; |
9d5c8243 | 6593 | |
047e0030 AD |
6594 | /* Write the ITR value calculated from the previous interrupt. */ |
6595 | igb_write_itr(q_vector); | |
9d5c8243 | 6596 | |
047e0030 | 6597 | napi_schedule(&q_vector->napi); |
844290e5 | 6598 | |
047e0030 | 6599 | return IRQ_HANDLED; |
fe4506b6 JC |
6600 | } |
6601 | ||
421e02f0 | 6602 | #ifdef CONFIG_IGB_DCA |
6a05004a AD |
6603 | static void igb_update_tx_dca(struct igb_adapter *adapter, |
6604 | struct igb_ring *tx_ring, | |
6605 | int cpu) | |
6606 | { | |
6607 | struct e1000_hw *hw = &adapter->hw; | |
6608 | u32 txctrl = dca3_get_tag(tx_ring->dev, cpu); | |
6609 | ||
6610 | if (hw->mac.type != e1000_82575) | |
6611 | txctrl <<= E1000_DCA_TXCTRL_CPUID_SHIFT; | |
6612 | ||
b980ac18 | 6613 | /* We can enable relaxed ordering for reads, but not writes when |
6a05004a AD |
6614 | * DCA is enabled. This is due to a known issue in some chipsets |
6615 | * which will cause the DCA tag to be cleared. | |
6616 | */ | |
6617 | txctrl |= E1000_DCA_TXCTRL_DESC_RRO_EN | | |
6618 | E1000_DCA_TXCTRL_DATA_RRO_EN | | |
6619 | E1000_DCA_TXCTRL_DESC_DCA_EN; | |
6620 | ||
6621 | wr32(E1000_DCA_TXCTRL(tx_ring->reg_idx), txctrl); | |
6622 | } | |
6623 | ||
6624 | static void igb_update_rx_dca(struct igb_adapter *adapter, | |
6625 | struct igb_ring *rx_ring, | |
6626 | int cpu) | |
6627 | { | |
6628 | struct e1000_hw *hw = &adapter->hw; | |
6629 | u32 rxctrl = dca3_get_tag(&adapter->pdev->dev, cpu); | |
6630 | ||
6631 | if (hw->mac.type != e1000_82575) | |
6632 | rxctrl <<= E1000_DCA_RXCTRL_CPUID_SHIFT; | |
6633 | ||
b980ac18 | 6634 | /* We can enable relaxed ordering for reads, but not writes when |
6a05004a AD |
6635 | * DCA is enabled. This is due to a known issue in some chipsets |
6636 | * which will cause the DCA tag to be cleared. | |
6637 | */ | |
6638 | rxctrl |= E1000_DCA_RXCTRL_DESC_RRO_EN | | |
6639 | E1000_DCA_RXCTRL_DESC_DCA_EN; | |
6640 | ||
6641 | wr32(E1000_DCA_RXCTRL(rx_ring->reg_idx), rxctrl); | |
6642 | } | |
6643 | ||
047e0030 | 6644 | static void igb_update_dca(struct igb_q_vector *q_vector) |
fe4506b6 | 6645 | { |
047e0030 | 6646 | struct igb_adapter *adapter = q_vector->adapter; |
fe4506b6 | 6647 | int cpu = get_cpu(); |
fe4506b6 | 6648 | |
047e0030 AD |
6649 | if (q_vector->cpu == cpu) |
6650 | goto out_no_update; | |
6651 | ||
6a05004a AD |
6652 | if (q_vector->tx.ring) |
6653 | igb_update_tx_dca(adapter, q_vector->tx.ring, cpu); | |
6654 | ||
6655 | if (q_vector->rx.ring) | |
6656 | igb_update_rx_dca(adapter, q_vector->rx.ring, cpu); | |
6657 | ||
047e0030 AD |
6658 | q_vector->cpu = cpu; |
6659 | out_no_update: | |
fe4506b6 JC |
6660 | put_cpu(); |
6661 | } | |
6662 | ||
6663 | static void igb_setup_dca(struct igb_adapter *adapter) | |
6664 | { | |
7e0e99ef | 6665 | struct e1000_hw *hw = &adapter->hw; |
fe4506b6 JC |
6666 | int i; |
6667 | ||
7dfc16fa | 6668 | if (!(adapter->flags & IGB_FLAG_DCA_ENABLED)) |
fe4506b6 JC |
6669 | return; |
6670 | ||
7e0e99ef AD |
6671 | /* Always use CB2 mode, difference is masked in the CB driver. */ |
6672 | wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_CB2); | |
6673 | ||
047e0030 | 6674 | for (i = 0; i < adapter->num_q_vectors; i++) { |
26b39276 AD |
6675 | adapter->q_vector[i]->cpu = -1; |
6676 | igb_update_dca(adapter->q_vector[i]); | |
fe4506b6 JC |
6677 | } |
6678 | } | |
6679 | ||
6680 | static int __igb_notify_dca(struct device *dev, void *data) | |
6681 | { | |
6682 | struct net_device *netdev = dev_get_drvdata(dev); | |
6683 | struct igb_adapter *adapter = netdev_priv(netdev); | |
090b1795 | 6684 | struct pci_dev *pdev = adapter->pdev; |
fe4506b6 JC |
6685 | struct e1000_hw *hw = &adapter->hw; |
6686 | unsigned long event = *(unsigned long *)data; | |
6687 | ||
6688 | switch (event) { | |
6689 | case DCA_PROVIDER_ADD: | |
6690 | /* if already enabled, don't do it again */ | |
7dfc16fa | 6691 | if (adapter->flags & IGB_FLAG_DCA_ENABLED) |
fe4506b6 | 6692 | break; |
fe4506b6 | 6693 | if (dca_add_requester(dev) == 0) { |
bbd98fe4 | 6694 | adapter->flags |= IGB_FLAG_DCA_ENABLED; |
090b1795 | 6695 | dev_info(&pdev->dev, "DCA enabled\n"); |
fe4506b6 JC |
6696 | igb_setup_dca(adapter); |
6697 | break; | |
6698 | } | |
b7b3ad7a | 6699 | /* Fall Through - since DCA is disabled. */ |
fe4506b6 | 6700 | case DCA_PROVIDER_REMOVE: |
7dfc16fa | 6701 | if (adapter->flags & IGB_FLAG_DCA_ENABLED) { |
fe4506b6 | 6702 | /* without this a class_device is left |
b980ac18 JK |
6703 | * hanging around in the sysfs model |
6704 | */ | |
fe4506b6 | 6705 | dca_remove_requester(dev); |
090b1795 | 6706 | dev_info(&pdev->dev, "DCA disabled\n"); |
7dfc16fa | 6707 | adapter->flags &= ~IGB_FLAG_DCA_ENABLED; |
cbd347ad | 6708 | wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_DISABLE); |
fe4506b6 JC |
6709 | } |
6710 | break; | |
6711 | } | |
bbd98fe4 | 6712 | |
fe4506b6 | 6713 | return 0; |
9d5c8243 AK |
6714 | } |
6715 | ||
fe4506b6 | 6716 | static int igb_notify_dca(struct notifier_block *nb, unsigned long event, |
b980ac18 | 6717 | void *p) |
fe4506b6 JC |
6718 | { |
6719 | int ret_val; | |
6720 | ||
6721 | ret_val = driver_for_each_device(&igb_driver.driver, NULL, &event, | |
b980ac18 | 6722 | __igb_notify_dca); |
fe4506b6 JC |
6723 | |
6724 | return ret_val ? NOTIFY_BAD : NOTIFY_DONE; | |
6725 | } | |
421e02f0 | 6726 | #endif /* CONFIG_IGB_DCA */ |
9d5c8243 | 6727 | |
0224d663 GR |
6728 | #ifdef CONFIG_PCI_IOV |
6729 | static int igb_vf_configure(struct igb_adapter *adapter, int vf) | |
6730 | { | |
6731 | unsigned char mac_addr[ETH_ALEN]; | |
0224d663 | 6732 | |
5ac6f91d | 6733 | eth_zero_addr(mac_addr); |
0224d663 GR |
6734 | igb_set_vf_mac(adapter, vf, mac_addr); |
6735 | ||
70ea4783 LL |
6736 | /* By default spoof check is enabled for all VFs */ |
6737 | adapter->vf_data[vf].spoofchk_enabled = true; | |
6738 | ||
1b8b062a CV |
6739 | /* By default VFs are not trusted */ |
6740 | adapter->vf_data[vf].trusted = false; | |
6741 | ||
f557147c | 6742 | return 0; |
0224d663 GR |
6743 | } |
6744 | ||
0224d663 | 6745 | #endif |
4ae196df AD |
6746 | static void igb_ping_all_vfs(struct igb_adapter *adapter) |
6747 | { | |
6748 | struct e1000_hw *hw = &adapter->hw; | |
6749 | u32 ping; | |
6750 | int i; | |
6751 | ||
6752 | for (i = 0 ; i < adapter->vfs_allocated_count; i++) { | |
6753 | ping = E1000_PF_CONTROL_MSG; | |
f2ca0dbe | 6754 | if (adapter->vf_data[i].flags & IGB_VF_FLAG_CTS) |
4ae196df AD |
6755 | ping |= E1000_VT_MSGTYPE_CTS; |
6756 | igb_write_mbx(hw, &ping, 1, i); | |
6757 | } | |
6758 | } | |
6759 | ||
7d5753f0 AD |
6760 | static int igb_set_vf_promisc(struct igb_adapter *adapter, u32 *msgbuf, u32 vf) |
6761 | { | |
6762 | struct e1000_hw *hw = &adapter->hw; | |
6763 | u32 vmolr = rd32(E1000_VMOLR(vf)); | |
6764 | struct vf_data_storage *vf_data = &adapter->vf_data[vf]; | |
6765 | ||
d85b9004 | 6766 | vf_data->flags &= ~(IGB_VF_FLAG_UNI_PROMISC | |
b980ac18 | 6767 | IGB_VF_FLAG_MULTI_PROMISC); |
7d5753f0 AD |
6768 | vmolr &= ~(E1000_VMOLR_ROPE | E1000_VMOLR_ROMPE | E1000_VMOLR_MPME); |
6769 | ||
6770 | if (*msgbuf & E1000_VF_SET_PROMISC_MULTICAST) { | |
6771 | vmolr |= E1000_VMOLR_MPME; | |
d85b9004 | 6772 | vf_data->flags |= IGB_VF_FLAG_MULTI_PROMISC; |
7d5753f0 AD |
6773 | *msgbuf &= ~E1000_VF_SET_PROMISC_MULTICAST; |
6774 | } else { | |
b980ac18 | 6775 | /* if we have hashes and we are clearing a multicast promisc |
7d5753f0 AD |
6776 | * flag we need to write the hashes to the MTA as this step |
6777 | * was previously skipped | |
6778 | */ | |
6779 | if (vf_data->num_vf_mc_hashes > 30) { | |
6780 | vmolr |= E1000_VMOLR_MPME; | |
6781 | } else if (vf_data->num_vf_mc_hashes) { | |
6782 | int j; | |
9005df38 | 6783 | |
7d5753f0 AD |
6784 | vmolr |= E1000_VMOLR_ROMPE; |
6785 | for (j = 0; j < vf_data->num_vf_mc_hashes; j++) | |
6786 | igb_mta_set(hw, vf_data->vf_mc_hashes[j]); | |
6787 | } | |
6788 | } | |
6789 | ||
6790 | wr32(E1000_VMOLR(vf), vmolr); | |
6791 | ||
6792 | /* there are flags left unprocessed, likely not supported */ | |
6793 | if (*msgbuf & E1000_VT_MSGINFO_MASK) | |
6794 | return -EINVAL; | |
6795 | ||
6796 | return 0; | |
7d5753f0 AD |
6797 | } |
6798 | ||
4ae196df AD |
6799 | static int igb_set_vf_multicasts(struct igb_adapter *adapter, |
6800 | u32 *msgbuf, u32 vf) | |
6801 | { | |
6802 | int n = (msgbuf[0] & E1000_VT_MSGINFO_MASK) >> E1000_VT_MSGINFO_SHIFT; | |
6803 | u16 *hash_list = (u16 *)&msgbuf[1]; | |
6804 | struct vf_data_storage *vf_data = &adapter->vf_data[vf]; | |
6805 | int i; | |
6806 | ||
7d5753f0 | 6807 | /* salt away the number of multicast addresses assigned |
4ae196df AD |
6808 | * to this VF for later use to restore when the PF multi cast |
6809 | * list changes | |
6810 | */ | |
6811 | vf_data->num_vf_mc_hashes = n; | |
6812 | ||
7d5753f0 AD |
6813 | /* only up to 30 hash values supported */ |
6814 | if (n > 30) | |
6815 | n = 30; | |
6816 | ||
6817 | /* store the hashes for later use */ | |
4ae196df | 6818 | for (i = 0; i < n; i++) |
a419aef8 | 6819 | vf_data->vf_mc_hashes[i] = hash_list[i]; |
4ae196df AD |
6820 | |
6821 | /* Flush and reset the mta with the new values */ | |
ff41f8dc | 6822 | igb_set_rx_mode(adapter->netdev); |
4ae196df AD |
6823 | |
6824 | return 0; | |
6825 | } | |
6826 | ||
6827 | static void igb_restore_vf_multicasts(struct igb_adapter *adapter) | |
6828 | { | |
6829 | struct e1000_hw *hw = &adapter->hw; | |
6830 | struct vf_data_storage *vf_data; | |
6831 | int i, j; | |
6832 | ||
6833 | for (i = 0; i < adapter->vfs_allocated_count; i++) { | |
7d5753f0 | 6834 | u32 vmolr = rd32(E1000_VMOLR(i)); |
9005df38 | 6835 | |
7d5753f0 AD |
6836 | vmolr &= ~(E1000_VMOLR_ROMPE | E1000_VMOLR_MPME); |
6837 | ||
4ae196df | 6838 | vf_data = &adapter->vf_data[i]; |
7d5753f0 AD |
6839 | |
6840 | if ((vf_data->num_vf_mc_hashes > 30) || | |
6841 | (vf_data->flags & IGB_VF_FLAG_MULTI_PROMISC)) { | |
6842 | vmolr |= E1000_VMOLR_MPME; | |
6843 | } else if (vf_data->num_vf_mc_hashes) { | |
6844 | vmolr |= E1000_VMOLR_ROMPE; | |
6845 | for (j = 0; j < vf_data->num_vf_mc_hashes; j++) | |
6846 | igb_mta_set(hw, vf_data->vf_mc_hashes[j]); | |
6847 | } | |
6848 | wr32(E1000_VMOLR(i), vmolr); | |
4ae196df AD |
6849 | } |
6850 | } | |
6851 | ||
6852 | static void igb_clear_vf_vfta(struct igb_adapter *adapter, u32 vf) | |
6853 | { | |
6854 | struct e1000_hw *hw = &adapter->hw; | |
16903caa | 6855 | u32 pool_mask, vlvf_mask, i; |
4ae196df | 6856 | |
16903caa AD |
6857 | /* create mask for VF and other pools */ |
6858 | pool_mask = E1000_VLVF_POOLSEL_MASK; | |
a51d8c21 | 6859 | vlvf_mask = BIT(E1000_VLVF_POOLSEL_SHIFT + vf); |
16903caa AD |
6860 | |
6861 | /* drop PF from pool bits */ | |
a51d8c21 JK |
6862 | pool_mask &= ~BIT(E1000_VLVF_POOLSEL_SHIFT + |
6863 | adapter->vfs_allocated_count); | |
4ae196df AD |
6864 | |
6865 | /* Find the vlan filter for this id */ | |
16903caa AD |
6866 | for (i = E1000_VLVF_ARRAY_SIZE; i--;) { |
6867 | u32 vlvf = rd32(E1000_VLVF(i)); | |
6868 | u32 vfta_mask, vid, vfta; | |
4ae196df AD |
6869 | |
6870 | /* remove the vf from the pool */ | |
16903caa AD |
6871 | if (!(vlvf & vlvf_mask)) |
6872 | continue; | |
6873 | ||
6874 | /* clear out bit from VLVF */ | |
6875 | vlvf ^= vlvf_mask; | |
6876 | ||
6877 | /* if other pools are present, just remove ourselves */ | |
6878 | if (vlvf & pool_mask) | |
6879 | goto update_vlvfb; | |
4ae196df | 6880 | |
16903caa AD |
6881 | /* if PF is present, leave VFTA */ |
6882 | if (vlvf & E1000_VLVF_POOLSEL_MASK) | |
6883 | goto update_vlvf; | |
4ae196df | 6884 | |
16903caa | 6885 | vid = vlvf & E1000_VLVF_VLANID_MASK; |
a51d8c21 | 6886 | vfta_mask = BIT(vid % 32); |
16903caa AD |
6887 | |
6888 | /* clear bit from VFTA */ | |
6889 | vfta = adapter->shadow_vfta[vid / 32]; | |
6890 | if (vfta & vfta_mask) | |
6891 | hw->mac.ops.write_vfta(hw, vid / 32, vfta ^ vfta_mask); | |
6892 | update_vlvf: | |
6893 | /* clear pool selection enable */ | |
6894 | if (adapter->flags & IGB_FLAG_VLAN_PROMISC) | |
6895 | vlvf &= E1000_VLVF_POOLSEL_MASK; | |
6896 | else | |
6897 | vlvf = 0; | |
6898 | update_vlvfb: | |
6899 | /* clear pool bits */ | |
6900 | wr32(E1000_VLVF(i), vlvf); | |
4ae196df AD |
6901 | } |
6902 | } | |
ae641bdc | 6903 | |
16903caa | 6904 | static int igb_find_vlvf_entry(struct e1000_hw *hw, u32 vlan) |
6f3dc319 | 6905 | { |
16903caa AD |
6906 | u32 vlvf; |
6907 | int idx; | |
6f3dc319 | 6908 | |
16903caa AD |
6909 | /* short cut the special case */ |
6910 | if (vlan == 0) | |
6911 | return 0; | |
6912 | ||
6913 | /* Search for the VLAN id in the VLVF entries */ | |
6914 | for (idx = E1000_VLVF_ARRAY_SIZE; --idx;) { | |
6915 | vlvf = rd32(E1000_VLVF(idx)); | |
6916 | if ((vlvf & VLAN_VID_MASK) == vlan) | |
6f3dc319 GR |
6917 | break; |
6918 | } | |
6919 | ||
16903caa | 6920 | return idx; |
4ae196df AD |
6921 | } |
6922 | ||
8008f68c | 6923 | static void igb_update_pf_vlvf(struct igb_adapter *adapter, u32 vid) |
4ae196df AD |
6924 | { |
6925 | struct e1000_hw *hw = &adapter->hw; | |
16903caa AD |
6926 | u32 bits, pf_id; |
6927 | int idx; | |
51466239 | 6928 | |
16903caa AD |
6929 | idx = igb_find_vlvf_entry(hw, vid); |
6930 | if (!idx) | |
6931 | return; | |
4ae196df | 6932 | |
16903caa AD |
6933 | /* See if any other pools are set for this VLAN filter |
6934 | * entry other than the PF. | |
6935 | */ | |
6936 | pf_id = adapter->vfs_allocated_count + E1000_VLVF_POOLSEL_SHIFT; | |
a51d8c21 | 6937 | bits = ~BIT(pf_id) & E1000_VLVF_POOLSEL_MASK; |
16903caa AD |
6938 | bits &= rd32(E1000_VLVF(idx)); |
6939 | ||
6940 | /* Disable the filter so this falls into the default pool. */ | |
6941 | if (!bits) { | |
6942 | if (adapter->flags & IGB_FLAG_VLAN_PROMISC) | |
a51d8c21 | 6943 | wr32(E1000_VLVF(idx), BIT(pf_id)); |
16903caa AD |
6944 | else |
6945 | wr32(E1000_VLVF(idx), 0); | |
4ae196df | 6946 | } |
6f3dc319 | 6947 | } |
4ae196df | 6948 | |
a15d9259 AD |
6949 | static s32 igb_set_vf_vlan(struct igb_adapter *adapter, u32 vid, |
6950 | bool add, u32 vf) | |
4ae196df | 6951 | { |
a15d9259 | 6952 | int pf_id = adapter->vfs_allocated_count; |
6f3dc319 | 6953 | struct e1000_hw *hw = &adapter->hw; |
a15d9259 | 6954 | int err; |
ae641bdc | 6955 | |
a15d9259 AD |
6956 | /* If VLAN overlaps with one the PF is currently monitoring make |
6957 | * sure that we are able to allocate a VLVF entry. This may be | |
6958 | * redundant but it guarantees PF will maintain visibility to | |
6959 | * the VLAN. | |
6f3dc319 | 6960 | */ |
16903caa | 6961 | if (add && test_bit(vid, adapter->active_vlans)) { |
a15d9259 AD |
6962 | err = igb_vfta_set(hw, vid, pf_id, true, false); |
6963 | if (err) | |
6964 | return err; | |
4ae196df | 6965 | } |
6f3dc319 | 6966 | |
a15d9259 | 6967 | err = igb_vfta_set(hw, vid, vf, add, false); |
6f3dc319 | 6968 | |
16903caa AD |
6969 | if (add && !err) |
6970 | return err; | |
6f3dc319 | 6971 | |
16903caa AD |
6972 | /* If we failed to add the VF VLAN or we are removing the VF VLAN |
6973 | * we may need to drop the PF pool bit in order to allow us to free | |
6974 | * up the VLVF resources. | |
6f3dc319 | 6975 | */ |
16903caa AD |
6976 | if (test_bit(vid, adapter->active_vlans) || |
6977 | (adapter->flags & IGB_FLAG_VLAN_PROMISC)) | |
6978 | igb_update_pf_vlvf(adapter, vid); | |
6f3dc319 | 6979 | |
6f3dc319 | 6980 | return err; |
8151d294 WM |
6981 | } |
6982 | ||
6983 | static void igb_set_vmvir(struct igb_adapter *adapter, u32 vid, u32 vf) | |
6984 | { | |
6985 | struct e1000_hw *hw = &adapter->hw; | |
6986 | ||
6987 | if (vid) | |
6988 | wr32(E1000_VMVIR(vf), (vid | E1000_VMVIR_VLANA_DEFAULT)); | |
6989 | else | |
6990 | wr32(E1000_VMVIR(vf), 0); | |
6991 | } | |
6992 | ||
a15d9259 AD |
6993 | static int igb_enable_port_vlan(struct igb_adapter *adapter, int vf, |
6994 | u16 vlan, u8 qos) | |
8151d294 | 6995 | { |
a15d9259 | 6996 | int err; |
8151d294 | 6997 | |
a15d9259 AD |
6998 | err = igb_set_vf_vlan(adapter, vlan, true, vf); |
6999 | if (err) | |
7000 | return err; | |
7001 | ||
7002 | igb_set_vmvir(adapter, vlan | (qos << VLAN_PRIO_SHIFT), vf); | |
7003 | igb_set_vmolr(adapter, vf, !vlan); | |
7004 | ||
7005 | /* revoke access to previous VLAN */ | |
7006 | if (vlan != adapter->vf_data[vf].pf_vlan) | |
7007 | igb_set_vf_vlan(adapter, adapter->vf_data[vf].pf_vlan, | |
7008 | false, vf); | |
7009 | ||
7010 | adapter->vf_data[vf].pf_vlan = vlan; | |
7011 | adapter->vf_data[vf].pf_qos = qos; | |
030f9f52 | 7012 | igb_set_vf_vlan_strip(adapter, vf, true); |
a15d9259 AD |
7013 | dev_info(&adapter->pdev->dev, |
7014 | "Setting VLAN %d, QOS 0x%x on VF %d\n", vlan, qos, vf); | |
7015 | if (test_bit(__IGB_DOWN, &adapter->state)) { | |
7016 | dev_warn(&adapter->pdev->dev, | |
7017 | "The VF VLAN has been set, but the PF device is not up.\n"); | |
7018 | dev_warn(&adapter->pdev->dev, | |
7019 | "Bring the PF device up before attempting to use the VF device.\n"); | |
b980ac18 | 7020 | } |
a15d9259 | 7021 | |
b980ac18 | 7022 | return err; |
4ae196df AD |
7023 | } |
7024 | ||
a15d9259 | 7025 | static int igb_disable_port_vlan(struct igb_adapter *adapter, int vf) |
6f3dc319 | 7026 | { |
a15d9259 AD |
7027 | /* Restore tagless access via VLAN 0 */ |
7028 | igb_set_vf_vlan(adapter, 0, true, vf); | |
6f3dc319 | 7029 | |
a15d9259 | 7030 | igb_set_vmvir(adapter, 0, vf); |
8151d294 | 7031 | igb_set_vmolr(adapter, vf, true); |
4ae196df | 7032 | |
a15d9259 AD |
7033 | /* Remove any PF assigned VLAN */ |
7034 | if (adapter->vf_data[vf].pf_vlan) | |
7035 | igb_set_vf_vlan(adapter, adapter->vf_data[vf].pf_vlan, | |
7036 | false, vf); | |
6f3dc319 | 7037 | |
a15d9259 AD |
7038 | adapter->vf_data[vf].pf_vlan = 0; |
7039 | adapter->vf_data[vf].pf_qos = 0; | |
030f9f52 | 7040 | igb_set_vf_vlan_strip(adapter, vf, false); |
6f3dc319 | 7041 | |
a15d9259 | 7042 | return 0; |
6f3dc319 GR |
7043 | } |
7044 | ||
79aab093 MS |
7045 | static int igb_ndo_set_vf_vlan(struct net_device *netdev, int vf, |
7046 | u16 vlan, u8 qos, __be16 vlan_proto) | |
4ae196df | 7047 | { |
a15d9259 | 7048 | struct igb_adapter *adapter = netdev_priv(netdev); |
4ae196df | 7049 | |
a15d9259 AD |
7050 | if ((vf >= adapter->vfs_allocated_count) || (vlan > 4095) || (qos > 7)) |
7051 | return -EINVAL; | |
6f3dc319 | 7052 | |
79aab093 MS |
7053 | if (vlan_proto != htons(ETH_P_8021Q)) |
7054 | return -EPROTONOSUPPORT; | |
7055 | ||
a15d9259 AD |
7056 | return (vlan || qos) ? igb_enable_port_vlan(adapter, vf, vlan, qos) : |
7057 | igb_disable_port_vlan(adapter, vf); | |
7058 | } | |
6f3dc319 | 7059 | |
a15d9259 AD |
7060 | static int igb_set_vf_vlan_msg(struct igb_adapter *adapter, u32 *msgbuf, u32 vf) |
7061 | { | |
7062 | int add = (msgbuf[0] & E1000_VT_MSGINFO_MASK) >> E1000_VT_MSGINFO_SHIFT; | |
7063 | int vid = (msgbuf[1] & E1000_VLVF_VLANID_MASK); | |
030f9f52 | 7064 | int ret; |
6f3dc319 | 7065 | |
a15d9259 AD |
7066 | if (adapter->vf_data[vf].pf_vlan) |
7067 | return -1; | |
6f3dc319 | 7068 | |
a15d9259 AD |
7069 | /* VLAN 0 is a special case, don't allow it to be removed */ |
7070 | if (!vid && !add) | |
7071 | return 0; | |
7072 | ||
030f9f52 CV |
7073 | ret = igb_set_vf_vlan(adapter, vid, !!add, vf); |
7074 | if (!ret) | |
7075 | igb_set_vf_vlan_strip(adapter, vf, !!vid); | |
7076 | return ret; | |
4ae196df AD |
7077 | } |
7078 | ||
f2ca0dbe | 7079 | static inline void igb_vf_reset(struct igb_adapter *adapter, u32 vf) |
4ae196df | 7080 | { |
a15d9259 | 7081 | struct vf_data_storage *vf_data = &adapter->vf_data[vf]; |
4ae196df | 7082 | |
a15d9259 AD |
7083 | /* clear flags - except flag that indicates PF has set the MAC */ |
7084 | vf_data->flags &= IGB_VF_FLAG_PF_SET_MAC; | |
7085 | vf_data->last_nack = jiffies; | |
4ae196df AD |
7086 | |
7087 | /* reset vlans for device */ | |
7088 | igb_clear_vf_vfta(adapter, vf); | |
a15d9259 AD |
7089 | igb_set_vf_vlan(adapter, vf_data->pf_vlan, true, vf); |
7090 | igb_set_vmvir(adapter, vf_data->pf_vlan | | |
7091 | (vf_data->pf_qos << VLAN_PRIO_SHIFT), vf); | |
7092 | igb_set_vmolr(adapter, vf, !vf_data->pf_vlan); | |
030f9f52 | 7093 | igb_set_vf_vlan_strip(adapter, vf, !!(vf_data->pf_vlan)); |
4ae196df AD |
7094 | |
7095 | /* reset multicast table array for vf */ | |
7096 | adapter->vf_data[vf].num_vf_mc_hashes = 0; | |
7097 | ||
7098 | /* Flush and reset the mta with the new values */ | |
ff41f8dc | 7099 | igb_set_rx_mode(adapter->netdev); |
4ae196df AD |
7100 | } |
7101 | ||
f2ca0dbe AD |
7102 | static void igb_vf_reset_event(struct igb_adapter *adapter, u32 vf) |
7103 | { | |
7104 | unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses; | |
7105 | ||
5ac6f91d | 7106 | /* clear mac address as we were hotplug removed/added */ |
8151d294 | 7107 | if (!(adapter->vf_data[vf].flags & IGB_VF_FLAG_PF_SET_MAC)) |
5ac6f91d | 7108 | eth_zero_addr(vf_mac); |
f2ca0dbe AD |
7109 | |
7110 | /* process remaining reset events */ | |
7111 | igb_vf_reset(adapter, vf); | |
7112 | } | |
7113 | ||
7114 | static void igb_vf_reset_msg(struct igb_adapter *adapter, u32 vf) | |
4ae196df AD |
7115 | { |
7116 | struct e1000_hw *hw = &adapter->hw; | |
7117 | unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses; | |
7118 | u32 reg, msgbuf[3]; | |
7119 | u8 *addr = (u8 *)(&msgbuf[1]); | |
7120 | ||
7121 | /* process all the same items cleared in a function level reset */ | |
f2ca0dbe | 7122 | igb_vf_reset(adapter, vf); |
4ae196df AD |
7123 | |
7124 | /* set vf mac address */ | |
83c21335 | 7125 | igb_set_vf_mac(adapter, vf, vf_mac); |
4ae196df AD |
7126 | |
7127 | /* enable transmit and receive for vf */ | |
7128 | reg = rd32(E1000_VFTE); | |
a51d8c21 | 7129 | wr32(E1000_VFTE, reg | BIT(vf)); |
4ae196df | 7130 | reg = rd32(E1000_VFRE); |
a51d8c21 | 7131 | wr32(E1000_VFRE, reg | BIT(vf)); |
4ae196df | 7132 | |
8fa7e0f7 | 7133 | adapter->vf_data[vf].flags |= IGB_VF_FLAG_CTS; |
4ae196df AD |
7134 | |
7135 | /* reply to reset with ack and vf mac address */ | |
6ddbc4cf AG |
7136 | if (!is_zero_ether_addr(vf_mac)) { |
7137 | msgbuf[0] = E1000_VF_RESET | E1000_VT_MSGTYPE_ACK; | |
7138 | memcpy(addr, vf_mac, ETH_ALEN); | |
7139 | } else { | |
7140 | msgbuf[0] = E1000_VF_RESET | E1000_VT_MSGTYPE_NACK; | |
7141 | } | |
4ae196df AD |
7142 | igb_write_mbx(hw, msgbuf, 3, vf); |
7143 | } | |
7144 | ||
83c21335 YK |
7145 | static void igb_flush_mac_table(struct igb_adapter *adapter) |
7146 | { | |
7147 | struct e1000_hw *hw = &adapter->hw; | |
7148 | int i; | |
7149 | ||
7150 | for (i = 0; i < hw->mac.rar_entry_count; i++) { | |
7151 | adapter->mac_table[i].state &= ~IGB_MAC_STATE_IN_USE; | |
7152 | memset(adapter->mac_table[i].addr, 0, ETH_ALEN); | |
7153 | adapter->mac_table[i].queue = 0; | |
7154 | igb_rar_set_index(adapter, i); | |
7155 | } | |
7156 | } | |
7157 | ||
7158 | static int igb_available_rars(struct igb_adapter *adapter, u8 queue) | |
7159 | { | |
7160 | struct e1000_hw *hw = &adapter->hw; | |
7161 | /* do not count rar entries reserved for VFs MAC addresses */ | |
7162 | int rar_entries = hw->mac.rar_entry_count - | |
7163 | adapter->vfs_allocated_count; | |
7164 | int i, count = 0; | |
7165 | ||
7166 | for (i = 0; i < rar_entries; i++) { | |
7167 | /* do not count default entries */ | |
7168 | if (adapter->mac_table[i].state & IGB_MAC_STATE_DEFAULT) | |
7169 | continue; | |
7170 | ||
7171 | /* do not count "in use" entries for different queues */ | |
7172 | if ((adapter->mac_table[i].state & IGB_MAC_STATE_IN_USE) && | |
7173 | (adapter->mac_table[i].queue != queue)) | |
7174 | continue; | |
7175 | ||
7176 | count++; | |
7177 | } | |
7178 | ||
7179 | return count; | |
7180 | } | |
7181 | ||
7182 | /* Set default MAC address for the PF in the first RAR entry */ | |
7183 | static void igb_set_default_mac_filter(struct igb_adapter *adapter) | |
7184 | { | |
7185 | struct igb_mac_addr *mac_table = &adapter->mac_table[0]; | |
7186 | ||
7187 | ether_addr_copy(mac_table->addr, adapter->hw.mac.addr); | |
7188 | mac_table->queue = adapter->vfs_allocated_count; | |
7189 | mac_table->state = IGB_MAC_STATE_DEFAULT | IGB_MAC_STATE_IN_USE; | |
7190 | ||
7191 | igb_rar_set_index(adapter, 0); | |
7192 | } | |
7193 | ||
872f923c VCG |
7194 | /* If the filter to be added and an already existing filter express |
7195 | * the same address and address type, it should be possible to only | |
7196 | * override the other configurations, for example the queue to steer | |
7197 | * traffic. | |
7198 | */ | |
7199 | static bool igb_mac_entry_can_be_used(const struct igb_mac_addr *entry, | |
7200 | const u8 *addr, const u8 flags) | |
7201 | { | |
7202 | if (!(entry->state & IGB_MAC_STATE_IN_USE)) | |
7203 | return true; | |
7204 | ||
7205 | if ((entry->state & IGB_MAC_STATE_SRC_ADDR) != | |
7206 | (flags & IGB_MAC_STATE_SRC_ADDR)) | |
7207 | return false; | |
7208 | ||
7209 | if (!ether_addr_equal(addr, entry->addr)) | |
7210 | return false; | |
7211 | ||
7212 | return true; | |
7213 | } | |
7214 | ||
1d717cf4 VCG |
7215 | /* Add a MAC filter for 'addr' directing matching traffic to 'queue', |
7216 | * 'flags' is used to indicate what kind of match is made, match is by | |
7217 | * default for the destination address, if matching by source address | |
7218 | * is desired the flag IGB_MAC_STATE_SRC_ADDR can be used. | |
7219 | */ | |
7220 | static int igb_add_mac_filter_flags(struct igb_adapter *adapter, | |
7221 | const u8 *addr, const u8 queue, | |
7222 | const u8 flags) | |
83c21335 YK |
7223 | { |
7224 | struct e1000_hw *hw = &adapter->hw; | |
7225 | int rar_entries = hw->mac.rar_entry_count - | |
7226 | adapter->vfs_allocated_count; | |
7227 | int i; | |
7228 | ||
7229 | if (is_zero_ether_addr(addr)) | |
7230 | return -EINVAL; | |
7231 | ||
7232 | /* Search for the first empty entry in the MAC table. | |
7233 | * Do not touch entries at the end of the table reserved for the VF MAC | |
7234 | * addresses. | |
7235 | */ | |
7236 | for (i = 0; i < rar_entries; i++) { | |
872f923c VCG |
7237 | if (!igb_mac_entry_can_be_used(&adapter->mac_table[i], |
7238 | addr, flags)) | |
83c21335 YK |
7239 | continue; |
7240 | ||
7241 | ether_addr_copy(adapter->mac_table[i].addr, addr); | |
7242 | adapter->mac_table[i].queue = queue; | |
1d717cf4 | 7243 | adapter->mac_table[i].state |= IGB_MAC_STATE_IN_USE | flags; |
83c21335 YK |
7244 | |
7245 | igb_rar_set_index(adapter, i); | |
7246 | return i; | |
7247 | } | |
7248 | ||
7249 | return -ENOSPC; | |
7250 | } | |
7251 | ||
1d717cf4 | 7252 | static int igb_add_mac_filter(struct igb_adapter *adapter, const u8 *addr, |
b476deab | 7253 | const u8 queue) |
1d717cf4 VCG |
7254 | { |
7255 | return igb_add_mac_filter_flags(adapter, addr, queue, 0); | |
7256 | } | |
7257 | ||
7258 | /* Remove a MAC filter for 'addr' directing matching traffic to | |
7259 | * 'queue', 'flags' is used to indicate what kind of match need to be | |
7260 | * removed, match is by default for the destination address, if | |
7261 | * matching by source address is to be removed the flag | |
7262 | * IGB_MAC_STATE_SRC_ADDR can be used. | |
7263 | */ | |
7264 | static int igb_del_mac_filter_flags(struct igb_adapter *adapter, | |
7265 | const u8 *addr, const u8 queue, | |
7266 | const u8 flags) | |
83c21335 YK |
7267 | { |
7268 | struct e1000_hw *hw = &adapter->hw; | |
7269 | int rar_entries = hw->mac.rar_entry_count - | |
7270 | adapter->vfs_allocated_count; | |
7271 | int i; | |
7272 | ||
7273 | if (is_zero_ether_addr(addr)) | |
7274 | return -EINVAL; | |
7275 | ||
7276 | /* Search for matching entry in the MAC table based on given address | |
7277 | * and queue. Do not touch entries at the end of the table reserved | |
7278 | * for the VF MAC addresses. | |
7279 | */ | |
7280 | for (i = 0; i < rar_entries; i++) { | |
7281 | if (!(adapter->mac_table[i].state & IGB_MAC_STATE_IN_USE)) | |
7282 | continue; | |
1d717cf4 VCG |
7283 | if ((adapter->mac_table[i].state & flags) != flags) |
7284 | continue; | |
83c21335 YK |
7285 | if (adapter->mac_table[i].queue != queue) |
7286 | continue; | |
7287 | if (!ether_addr_equal(adapter->mac_table[i].addr, addr)) | |
7288 | continue; | |
7289 | ||
872f923c VCG |
7290 | /* When a filter for the default address is "deleted", |
7291 | * we return it to its initial configuration | |
7292 | */ | |
7293 | if (adapter->mac_table[i].state & IGB_MAC_STATE_DEFAULT) { | |
7294 | adapter->mac_table[i].state = | |
7295 | IGB_MAC_STATE_DEFAULT | IGB_MAC_STATE_IN_USE; | |
7296 | adapter->mac_table[i].queue = | |
7297 | adapter->vfs_allocated_count; | |
7298 | } else { | |
7299 | adapter->mac_table[i].state = 0; | |
7300 | adapter->mac_table[i].queue = 0; | |
7301 | memset(adapter->mac_table[i].addr, 0, ETH_ALEN); | |
7302 | } | |
83c21335 YK |
7303 | |
7304 | igb_rar_set_index(adapter, i); | |
7305 | return 0; | |
7306 | } | |
7307 | ||
7308 | return -ENOENT; | |
7309 | } | |
7310 | ||
1d717cf4 VCG |
7311 | static int igb_del_mac_filter(struct igb_adapter *adapter, const u8 *addr, |
7312 | const u8 queue) | |
7313 | { | |
7314 | return igb_del_mac_filter_flags(adapter, addr, queue, 0); | |
7315 | } | |
7316 | ||
0a823899 VCG |
7317 | int igb_add_mac_steering_filter(struct igb_adapter *adapter, |
7318 | const u8 *addr, u8 queue, u8 flags) | |
7319 | { | |
7320 | struct e1000_hw *hw = &adapter->hw; | |
7321 | ||
7322 | /* In theory, this should be supported on 82575 as well, but | |
7323 | * that part wasn't easily accessible during development. | |
7324 | */ | |
7325 | if (hw->mac.type != e1000_i210) | |
7326 | return -EOPNOTSUPP; | |
7327 | ||
7328 | return igb_add_mac_filter_flags(adapter, addr, queue, | |
7329 | IGB_MAC_STATE_QUEUE_STEERING | flags); | |
7330 | } | |
7331 | ||
7332 | int igb_del_mac_steering_filter(struct igb_adapter *adapter, | |
7333 | const u8 *addr, u8 queue, u8 flags) | |
7334 | { | |
7335 | return igb_del_mac_filter_flags(adapter, addr, queue, | |
7336 | IGB_MAC_STATE_QUEUE_STEERING | flags); | |
7337 | } | |
7338 | ||
83c21335 YK |
7339 | static int igb_uc_sync(struct net_device *netdev, const unsigned char *addr) |
7340 | { | |
7341 | struct igb_adapter *adapter = netdev_priv(netdev); | |
7342 | int ret; | |
7343 | ||
7344 | ret = igb_add_mac_filter(adapter, addr, adapter->vfs_allocated_count); | |
7345 | ||
7346 | return min_t(int, ret, 0); | |
7347 | } | |
7348 | ||
7349 | static int igb_uc_unsync(struct net_device *netdev, const unsigned char *addr) | |
7350 | { | |
7351 | struct igb_adapter *adapter = netdev_priv(netdev); | |
7352 | ||
7353 | igb_del_mac_filter(adapter, addr, adapter->vfs_allocated_count); | |
7354 | ||
7355 | return 0; | |
7356 | } | |
7357 | ||
b476deab CIK |
7358 | static int igb_set_vf_mac_filter(struct igb_adapter *adapter, const int vf, |
7359 | const u32 info, const u8 *addr) | |
4827cc37 YK |
7360 | { |
7361 | struct pci_dev *pdev = adapter->pdev; | |
7362 | struct vf_data_storage *vf_data = &adapter->vf_data[vf]; | |
7363 | struct list_head *pos; | |
7364 | struct vf_mac_filter *entry = NULL; | |
7365 | int ret = 0; | |
7366 | ||
7367 | switch (info) { | |
7368 | case E1000_VF_MAC_FILTER_CLR: | |
7369 | /* remove all unicast MAC filters related to the current VF */ | |
7370 | list_for_each(pos, &adapter->vf_macs.l) { | |
7371 | entry = list_entry(pos, struct vf_mac_filter, l); | |
7372 | if (entry->vf == vf) { | |
7373 | entry->vf = -1; | |
7374 | entry->free = true; | |
7375 | igb_del_mac_filter(adapter, entry->vf_mac, vf); | |
7376 | } | |
7377 | } | |
7378 | break; | |
7379 | case E1000_VF_MAC_FILTER_ADD: | |
1b8b062a CV |
7380 | if ((vf_data->flags & IGB_VF_FLAG_PF_SET_MAC) && |
7381 | !vf_data->trusted) { | |
4827cc37 YK |
7382 | dev_warn(&pdev->dev, |
7383 | "VF %d requested MAC filter but is administratively denied\n", | |
7384 | vf); | |
7385 | return -EINVAL; | |
7386 | } | |
4827cc37 YK |
7387 | if (!is_valid_ether_addr(addr)) { |
7388 | dev_warn(&pdev->dev, | |
7389 | "VF %d attempted to set invalid MAC filter\n", | |
7390 | vf); | |
7391 | return -EINVAL; | |
7392 | } | |
7393 | ||
7394 | /* try to find empty slot in the list */ | |
7395 | list_for_each(pos, &adapter->vf_macs.l) { | |
7396 | entry = list_entry(pos, struct vf_mac_filter, l); | |
7397 | if (entry->free) | |
7398 | break; | |
7399 | } | |
7400 | ||
7401 | if (entry && entry->free) { | |
7402 | entry->free = false; | |
7403 | entry->vf = vf; | |
7404 | ether_addr_copy(entry->vf_mac, addr); | |
7405 | ||
7406 | ret = igb_add_mac_filter(adapter, addr, vf); | |
7407 | ret = min_t(int, ret, 0); | |
7408 | } else { | |
7409 | ret = -ENOSPC; | |
7410 | } | |
7411 | ||
7412 | if (ret == -ENOSPC) | |
7413 | dev_warn(&pdev->dev, | |
7414 | "VF %d has requested MAC filter but there is no space for it\n", | |
7415 | vf); | |
7416 | break; | |
7417 | default: | |
7418 | ret = -EINVAL; | |
7419 | break; | |
7420 | } | |
7421 | ||
7422 | return ret; | |
7423 | } | |
7424 | ||
4ae196df AD |
7425 | static int igb_set_vf_mac_addr(struct igb_adapter *adapter, u32 *msg, int vf) |
7426 | { | |
4827cc37 YK |
7427 | struct pci_dev *pdev = adapter->pdev; |
7428 | struct vf_data_storage *vf_data = &adapter->vf_data[vf]; | |
7429 | u32 info = msg[0] & E1000_VT_MSGINFO_MASK; | |
7430 | ||
b980ac18 | 7431 | /* The VF MAC Address is stored in a packed array of bytes |
de42edde GR |
7432 | * starting at the second 32 bit word of the msg array |
7433 | */ | |
4827cc37 YK |
7434 | unsigned char *addr = (unsigned char *)&msg[1]; |
7435 | int ret = 0; | |
4ae196df | 7436 | |
4827cc37 | 7437 | if (!info) { |
1b8b062a CV |
7438 | if ((vf_data->flags & IGB_VF_FLAG_PF_SET_MAC) && |
7439 | !vf_data->trusted) { | |
4827cc37 YK |
7440 | dev_warn(&pdev->dev, |
7441 | "VF %d attempted to override administratively set MAC address\nReload the VF driver to resume operations\n", | |
7442 | vf); | |
7443 | return -EINVAL; | |
7444 | } | |
4ae196df | 7445 | |
4827cc37 YK |
7446 | if (!is_valid_ether_addr(addr)) { |
7447 | dev_warn(&pdev->dev, | |
7448 | "VF %d attempted to set invalid MAC\n", | |
7449 | vf); | |
7450 | return -EINVAL; | |
7451 | } | |
7452 | ||
7453 | ret = igb_set_vf_mac(adapter, vf, addr); | |
7454 | } else { | |
7455 | ret = igb_set_vf_mac_filter(adapter, vf, info, addr); | |
7456 | } | |
7457 | ||
7458 | return ret; | |
4ae196df AD |
7459 | } |
7460 | ||
7461 | static void igb_rcv_ack_from_vf(struct igb_adapter *adapter, u32 vf) | |
7462 | { | |
7463 | struct e1000_hw *hw = &adapter->hw; | |
f2ca0dbe | 7464 | struct vf_data_storage *vf_data = &adapter->vf_data[vf]; |
4ae196df AD |
7465 | u32 msg = E1000_VT_MSGTYPE_NACK; |
7466 | ||
7467 | /* if device isn't clear to send it shouldn't be reading either */ | |
f2ca0dbe AD |
7468 | if (!(vf_data->flags & IGB_VF_FLAG_CTS) && |
7469 | time_after(jiffies, vf_data->last_nack + (2 * HZ))) { | |
4ae196df | 7470 | igb_write_mbx(hw, &msg, 1, vf); |
f2ca0dbe | 7471 | vf_data->last_nack = jiffies; |
4ae196df AD |
7472 | } |
7473 | } | |
7474 | ||
f2ca0dbe | 7475 | static void igb_rcv_msg_from_vf(struct igb_adapter *adapter, u32 vf) |
4ae196df | 7476 | { |
f2ca0dbe AD |
7477 | struct pci_dev *pdev = adapter->pdev; |
7478 | u32 msgbuf[E1000_VFMAILBOX_SIZE]; | |
4ae196df | 7479 | struct e1000_hw *hw = &adapter->hw; |
f2ca0dbe | 7480 | struct vf_data_storage *vf_data = &adapter->vf_data[vf]; |
4ae196df AD |
7481 | s32 retval; |
7482 | ||
46b3bb9b | 7483 | retval = igb_read_mbx(hw, msgbuf, E1000_VFMAILBOX_SIZE, vf, false); |
4ae196df | 7484 | |
fef45f4c AD |
7485 | if (retval) { |
7486 | /* if receive failed revoke VF CTS stats and restart init */ | |
f2ca0dbe | 7487 | dev_err(&pdev->dev, "Error receiving message from VF\n"); |
fef45f4c AD |
7488 | vf_data->flags &= ~IGB_VF_FLAG_CTS; |
7489 | if (!time_after(jiffies, vf_data->last_nack + (2 * HZ))) | |
46b3bb9b | 7490 | goto unlock; |
fef45f4c AD |
7491 | goto out; |
7492 | } | |
4ae196df AD |
7493 | |
7494 | /* this is a message we already processed, do nothing */ | |
7495 | if (msgbuf[0] & (E1000_VT_MSGTYPE_ACK | E1000_VT_MSGTYPE_NACK)) | |
46b3bb9b | 7496 | goto unlock; |
4ae196df | 7497 | |
b980ac18 | 7498 | /* until the vf completes a reset it should not be |
4ae196df AD |
7499 | * allowed to start any configuration. |
7500 | */ | |
4ae196df | 7501 | if (msgbuf[0] == E1000_VF_RESET) { |
46b3bb9b | 7502 | /* unlocks mailbox */ |
4ae196df | 7503 | igb_vf_reset_msg(adapter, vf); |
f2ca0dbe | 7504 | return; |
4ae196df AD |
7505 | } |
7506 | ||
f2ca0dbe | 7507 | if (!(vf_data->flags & IGB_VF_FLAG_CTS)) { |
fef45f4c | 7508 | if (!time_after(jiffies, vf_data->last_nack + (2 * HZ))) |
46b3bb9b | 7509 | goto unlock; |
fef45f4c AD |
7510 | retval = -1; |
7511 | goto out; | |
4ae196df AD |
7512 | } |
7513 | ||
7514 | switch ((msgbuf[0] & 0xFFFF)) { | |
7515 | case E1000_VF_SET_MAC_ADDR: | |
4827cc37 | 7516 | retval = igb_set_vf_mac_addr(adapter, msgbuf, vf); |
4ae196df | 7517 | break; |
7d5753f0 AD |
7518 | case E1000_VF_SET_PROMISC: |
7519 | retval = igb_set_vf_promisc(adapter, msgbuf, vf); | |
7520 | break; | |
4ae196df AD |
7521 | case E1000_VF_SET_MULTICAST: |
7522 | retval = igb_set_vf_multicasts(adapter, msgbuf, vf); | |
7523 | break; | |
7524 | case E1000_VF_SET_LPE: | |
7525 | retval = igb_set_vf_rlpml(adapter, msgbuf[1], vf); | |
7526 | break; | |
7527 | case E1000_VF_SET_VLAN: | |
a6b5ea35 GR |
7528 | retval = -1; |
7529 | if (vf_data->pf_vlan) | |
7530 | dev_warn(&pdev->dev, | |
b980ac18 JK |
7531 | "VF %d attempted to override administratively set VLAN tag\nReload the VF driver to resume operations\n", |
7532 | vf); | |
8151d294 | 7533 | else |
a15d9259 | 7534 | retval = igb_set_vf_vlan_msg(adapter, msgbuf, vf); |
4ae196df AD |
7535 | break; |
7536 | default: | |
090b1795 | 7537 | dev_err(&pdev->dev, "Unhandled Msg %08x\n", msgbuf[0]); |
4ae196df AD |
7538 | retval = -1; |
7539 | break; | |
7540 | } | |
7541 | ||
fef45f4c AD |
7542 | msgbuf[0] |= E1000_VT_MSGTYPE_CTS; |
7543 | out: | |
4ae196df AD |
7544 | /* notify the VF of the results of what it sent us */ |
7545 | if (retval) | |
7546 | msgbuf[0] |= E1000_VT_MSGTYPE_NACK; | |
7547 | else | |
7548 | msgbuf[0] |= E1000_VT_MSGTYPE_ACK; | |
7549 | ||
46b3bb9b | 7550 | /* unlocks mailbox */ |
4ae196df | 7551 | igb_write_mbx(hw, msgbuf, 1, vf); |
46b3bb9b GE |
7552 | return; |
7553 | ||
7554 | unlock: | |
7555 | igb_unlock_mbx(hw, vf); | |
f2ca0dbe | 7556 | } |
4ae196df | 7557 | |
f2ca0dbe AD |
7558 | static void igb_msg_task(struct igb_adapter *adapter) |
7559 | { | |
7560 | struct e1000_hw *hw = &adapter->hw; | |
7561 | u32 vf; | |
7562 | ||
7563 | for (vf = 0; vf < adapter->vfs_allocated_count; vf++) { | |
7564 | /* process any reset requests */ | |
7565 | if (!igb_check_for_rst(hw, vf)) | |
7566 | igb_vf_reset_event(adapter, vf); | |
7567 | ||
7568 | /* process any messages pending */ | |
7569 | if (!igb_check_for_msg(hw, vf)) | |
7570 | igb_rcv_msg_from_vf(adapter, vf); | |
7571 | ||
7572 | /* process any acks */ | |
7573 | if (!igb_check_for_ack(hw, vf)) | |
7574 | igb_rcv_ack_from_vf(adapter, vf); | |
7575 | } | |
4ae196df AD |
7576 | } |
7577 | ||
68d480c4 AD |
7578 | /** |
7579 | * igb_set_uta - Set unicast filter table address | |
7580 | * @adapter: board private structure | |
bf456abb | 7581 | * @set: boolean indicating if we are setting or clearing bits |
68d480c4 AD |
7582 | * |
7583 | * The unicast table address is a register array of 32-bit registers. | |
7584 | * The table is meant to be used in a way similar to how the MTA is used | |
7585 | * however due to certain limitations in the hardware it is necessary to | |
25985edc LDM |
7586 | * set all the hash bits to 1 and use the VMOLR ROPE bit as a promiscuous |
7587 | * enable bit to allow vlan tag stripping when promiscuous mode is enabled | |
68d480c4 | 7588 | **/ |
bf456abb | 7589 | static void igb_set_uta(struct igb_adapter *adapter, bool set) |
68d480c4 AD |
7590 | { |
7591 | struct e1000_hw *hw = &adapter->hw; | |
bf456abb | 7592 | u32 uta = set ? ~0 : 0; |
68d480c4 AD |
7593 | int i; |
7594 | ||
68d480c4 AD |
7595 | /* we only need to do this if VMDq is enabled */ |
7596 | if (!adapter->vfs_allocated_count) | |
7597 | return; | |
7598 | ||
bf456abb AD |
7599 | for (i = hw->mac.uta_reg_count; i--;) |
7600 | array_wr32(E1000_UTA, i, uta); | |
68d480c4 AD |
7601 | } |
7602 | ||
9d5c8243 | 7603 | /** |
b980ac18 JK |
7604 | * igb_intr_msi - Interrupt Handler |
7605 | * @irq: interrupt number | |
7606 | * @data: pointer to a network interface device structure | |
9d5c8243 AK |
7607 | **/ |
7608 | static irqreturn_t igb_intr_msi(int irq, void *data) | |
7609 | { | |
047e0030 AD |
7610 | struct igb_adapter *adapter = data; |
7611 | struct igb_q_vector *q_vector = adapter->q_vector[0]; | |
9d5c8243 AK |
7612 | struct e1000_hw *hw = &adapter->hw; |
7613 | /* read ICR disables interrupts using IAM */ | |
7614 | u32 icr = rd32(E1000_ICR); | |
7615 | ||
047e0030 | 7616 | igb_write_itr(q_vector); |
9d5c8243 | 7617 | |
7f081d40 AD |
7618 | if (icr & E1000_ICR_DRSTA) |
7619 | schedule_work(&adapter->reset_task); | |
7620 | ||
047e0030 | 7621 | if (icr & E1000_ICR_DOUTSYNC) { |
dda0e083 AD |
7622 | /* HW is reporting DMA is out of sync */ |
7623 | adapter->stats.doosync++; | |
7624 | } | |
7625 | ||
9d5c8243 AK |
7626 | if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) { |
7627 | hw->mac.get_link_status = 1; | |
7628 | if (!test_bit(__IGB_DOWN, &adapter->state)) | |
7629 | mod_timer(&adapter->watchdog_timer, jiffies + 1); | |
7630 | } | |
7631 | ||
61d7f75f RC |
7632 | if (icr & E1000_ICR_TS) |
7633 | igb_tsync_interrupt(adapter); | |
1f6e8178 | 7634 | |
047e0030 | 7635 | napi_schedule(&q_vector->napi); |
9d5c8243 AK |
7636 | |
7637 | return IRQ_HANDLED; | |
7638 | } | |
7639 | ||
7640 | /** | |
b980ac18 JK |
7641 | * igb_intr - Legacy Interrupt Handler |
7642 | * @irq: interrupt number | |
7643 | * @data: pointer to a network interface device structure | |
9d5c8243 AK |
7644 | **/ |
7645 | static irqreturn_t igb_intr(int irq, void *data) | |
7646 | { | |
047e0030 AD |
7647 | struct igb_adapter *adapter = data; |
7648 | struct igb_q_vector *q_vector = adapter->q_vector[0]; | |
9d5c8243 AK |
7649 | struct e1000_hw *hw = &adapter->hw; |
7650 | /* Interrupt Auto-Mask...upon reading ICR, interrupts are masked. No | |
b980ac18 JK |
7651 | * need for the IMC write |
7652 | */ | |
9d5c8243 | 7653 | u32 icr = rd32(E1000_ICR); |
9d5c8243 AK |
7654 | |
7655 | /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is | |
b980ac18 JK |
7656 | * not set, then the adapter didn't send an interrupt |
7657 | */ | |
9d5c8243 AK |
7658 | if (!(icr & E1000_ICR_INT_ASSERTED)) |
7659 | return IRQ_NONE; | |
7660 | ||
0ba82994 AD |
7661 | igb_write_itr(q_vector); |
7662 | ||
7f081d40 AD |
7663 | if (icr & E1000_ICR_DRSTA) |
7664 | schedule_work(&adapter->reset_task); | |
7665 | ||
047e0030 | 7666 | if (icr & E1000_ICR_DOUTSYNC) { |
dda0e083 AD |
7667 | /* HW is reporting DMA is out of sync */ |
7668 | adapter->stats.doosync++; | |
7669 | } | |
7670 | ||
9d5c8243 AK |
7671 | if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) { |
7672 | hw->mac.get_link_status = 1; | |
7673 | /* guard against interrupt when we're going down */ | |
7674 | if (!test_bit(__IGB_DOWN, &adapter->state)) | |
7675 | mod_timer(&adapter->watchdog_timer, jiffies + 1); | |
7676 | } | |
7677 | ||
61d7f75f RC |
7678 | if (icr & E1000_ICR_TS) |
7679 | igb_tsync_interrupt(adapter); | |
1f6e8178 | 7680 | |
047e0030 | 7681 | napi_schedule(&q_vector->napi); |
9d5c8243 AK |
7682 | |
7683 | return IRQ_HANDLED; | |
7684 | } | |
7685 | ||
c50b52a0 | 7686 | static void igb_ring_irq_enable(struct igb_q_vector *q_vector) |
9d5c8243 | 7687 | { |
047e0030 | 7688 | struct igb_adapter *adapter = q_vector->adapter; |
46544258 | 7689 | struct e1000_hw *hw = &adapter->hw; |
9d5c8243 | 7690 | |
0ba82994 AD |
7691 | if ((q_vector->rx.ring && (adapter->rx_itr_setting & 3)) || |
7692 | (!q_vector->rx.ring && (adapter->tx_itr_setting & 3))) { | |
7693 | if ((adapter->num_q_vectors == 1) && !adapter->vf_data) | |
7694 | igb_set_itr(q_vector); | |
46544258 | 7695 | else |
047e0030 | 7696 | igb_update_ring_itr(q_vector); |
9d5c8243 AK |
7697 | } |
7698 | ||
46544258 | 7699 | if (!test_bit(__IGB_DOWN, &adapter->state)) { |
cd14ef54 | 7700 | if (adapter->flags & IGB_FLAG_HAS_MSIX) |
047e0030 | 7701 | wr32(E1000_EIMS, q_vector->eims_value); |
46544258 AD |
7702 | else |
7703 | igb_irq_enable(adapter); | |
7704 | } | |
9d5c8243 AK |
7705 | } |
7706 | ||
46544258 | 7707 | /** |
b980ac18 JK |
7708 | * igb_poll - NAPI Rx polling callback |
7709 | * @napi: napi polling structure | |
7710 | * @budget: count of how many packets we should handle | |
46544258 AD |
7711 | **/ |
7712 | static int igb_poll(struct napi_struct *napi, int budget) | |
9d5c8243 | 7713 | { |
047e0030 | 7714 | struct igb_q_vector *q_vector = container_of(napi, |
b980ac18 JK |
7715 | struct igb_q_vector, |
7716 | napi); | |
16eb8815 | 7717 | bool clean_complete = true; |
32b3e08f | 7718 | int work_done = 0; |
9d5c8243 | 7719 | |
421e02f0 | 7720 | #ifdef CONFIG_IGB_DCA |
047e0030 AD |
7721 | if (q_vector->adapter->flags & IGB_FLAG_DCA_ENABLED) |
7722 | igb_update_dca(q_vector); | |
fe4506b6 | 7723 | #endif |
0ba82994 | 7724 | if (q_vector->tx.ring) |
7f0ba845 | 7725 | clean_complete = igb_clean_tx_irq(q_vector, budget); |
9d5c8243 | 7726 | |
32b3e08f JB |
7727 | if (q_vector->rx.ring) { |
7728 | int cleaned = igb_clean_rx_irq(q_vector, budget); | |
7729 | ||
7730 | work_done += cleaned; | |
7f0ba845 AD |
7731 | if (cleaned >= budget) |
7732 | clean_complete = false; | |
32b3e08f | 7733 | } |
047e0030 | 7734 | |
16eb8815 AD |
7735 | /* If all work not completed, return budget and keep polling */ |
7736 | if (!clean_complete) | |
7737 | return budget; | |
46544258 | 7738 | |
0bcd952f JB |
7739 | /* Exit the polling mode, but don't re-enable interrupts if stack might |
7740 | * poll us due to busy-polling | |
7741 | */ | |
7742 | if (likely(napi_complete_done(napi, work_done))) | |
7743 | igb_ring_irq_enable(q_vector); | |
9d5c8243 | 7744 | |
0bcd952f | 7745 | return min(work_done, budget - 1); |
9d5c8243 | 7746 | } |
6d8126f9 | 7747 | |
9d5c8243 | 7748 | /** |
b980ac18 JK |
7749 | * igb_clean_tx_irq - Reclaim resources after transmit completes |
7750 | * @q_vector: pointer to q_vector containing needed info | |
7f0ba845 | 7751 | * @napi_budget: Used to determine if we are in netpoll |
49ce9c2c | 7752 | * |
b980ac18 | 7753 | * returns true if ring is completely cleaned |
9d5c8243 | 7754 | **/ |
7f0ba845 | 7755 | static bool igb_clean_tx_irq(struct igb_q_vector *q_vector, int napi_budget) |
9d5c8243 | 7756 | { |
047e0030 | 7757 | struct igb_adapter *adapter = q_vector->adapter; |
0ba82994 | 7758 | struct igb_ring *tx_ring = q_vector->tx.ring; |
06034649 | 7759 | struct igb_tx_buffer *tx_buffer; |
f4128785 | 7760 | union e1000_adv_tx_desc *tx_desc; |
9d5c8243 | 7761 | unsigned int total_bytes = 0, total_packets = 0; |
0ba82994 | 7762 | unsigned int budget = q_vector->tx.work_limit; |
8542db05 | 7763 | unsigned int i = tx_ring->next_to_clean; |
9d5c8243 | 7764 | |
13fde97a AD |
7765 | if (test_bit(__IGB_DOWN, &adapter->state)) |
7766 | return true; | |
0e014cb1 | 7767 | |
06034649 | 7768 | tx_buffer = &tx_ring->tx_buffer_info[i]; |
13fde97a | 7769 | tx_desc = IGB_TX_DESC(tx_ring, i); |
8542db05 | 7770 | i -= tx_ring->count; |
9d5c8243 | 7771 | |
f4128785 AD |
7772 | do { |
7773 | union e1000_adv_tx_desc *eop_desc = tx_buffer->next_to_watch; | |
8542db05 AD |
7774 | |
7775 | /* if next_to_watch is not set then there is no work pending */ | |
7776 | if (!eop_desc) | |
7777 | break; | |
13fde97a | 7778 | |
f4128785 | 7779 | /* prevent any other reads prior to eop_desc */ |
c4cb9918 | 7780 | smp_rmb(); |
f4128785 | 7781 | |
13fde97a AD |
7782 | /* if DD is not set pending work has not been completed */ |
7783 | if (!(eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD))) | |
7784 | break; | |
7785 | ||
8542db05 AD |
7786 | /* clear next_to_watch to prevent false hangs */ |
7787 | tx_buffer->next_to_watch = NULL; | |
9d5c8243 | 7788 | |
ebe42d16 AD |
7789 | /* update the statistics for this packet */ |
7790 | total_bytes += tx_buffer->bytecount; | |
7791 | total_packets += tx_buffer->gso_segs; | |
13fde97a | 7792 | |
ebe42d16 | 7793 | /* free the skb */ |
7f0ba845 | 7794 | napi_consume_skb(tx_buffer->skb, napi_budget); |
13fde97a | 7795 | |
ebe42d16 AD |
7796 | /* unmap skb header data */ |
7797 | dma_unmap_single(tx_ring->dev, | |
c9f14bf3 AD |
7798 | dma_unmap_addr(tx_buffer, dma), |
7799 | dma_unmap_len(tx_buffer, len), | |
ebe42d16 AD |
7800 | DMA_TO_DEVICE); |
7801 | ||
c9f14bf3 | 7802 | /* clear tx_buffer data */ |
c9f14bf3 AD |
7803 | dma_unmap_len_set(tx_buffer, len, 0); |
7804 | ||
ebe42d16 AD |
7805 | /* clear last DMA location and unmap remaining buffers */ |
7806 | while (tx_desc != eop_desc) { | |
13fde97a AD |
7807 | tx_buffer++; |
7808 | tx_desc++; | |
9d5c8243 | 7809 | i++; |
8542db05 AD |
7810 | if (unlikely(!i)) { |
7811 | i -= tx_ring->count; | |
06034649 | 7812 | tx_buffer = tx_ring->tx_buffer_info; |
13fde97a AD |
7813 | tx_desc = IGB_TX_DESC(tx_ring, 0); |
7814 | } | |
ebe42d16 AD |
7815 | |
7816 | /* unmap any remaining paged data */ | |
c9f14bf3 | 7817 | if (dma_unmap_len(tx_buffer, len)) { |
ebe42d16 | 7818 | dma_unmap_page(tx_ring->dev, |
c9f14bf3 AD |
7819 | dma_unmap_addr(tx_buffer, dma), |
7820 | dma_unmap_len(tx_buffer, len), | |
ebe42d16 | 7821 | DMA_TO_DEVICE); |
c9f14bf3 | 7822 | dma_unmap_len_set(tx_buffer, len, 0); |
ebe42d16 AD |
7823 | } |
7824 | } | |
7825 | ||
ebe42d16 AD |
7826 | /* move us one more past the eop_desc for start of next pkt */ |
7827 | tx_buffer++; | |
7828 | tx_desc++; | |
7829 | i++; | |
7830 | if (unlikely(!i)) { | |
7831 | i -= tx_ring->count; | |
7832 | tx_buffer = tx_ring->tx_buffer_info; | |
7833 | tx_desc = IGB_TX_DESC(tx_ring, 0); | |
7834 | } | |
f4128785 AD |
7835 | |
7836 | /* issue prefetch for next Tx descriptor */ | |
7837 | prefetch(tx_desc); | |
7838 | ||
7839 | /* update budget accounting */ | |
7840 | budget--; | |
7841 | } while (likely(budget)); | |
0e014cb1 | 7842 | |
bdbc0631 ED |
7843 | netdev_tx_completed_queue(txring_txq(tx_ring), |
7844 | total_packets, total_bytes); | |
8542db05 | 7845 | i += tx_ring->count; |
9d5c8243 | 7846 | tx_ring->next_to_clean = i; |
13fde97a AD |
7847 | u64_stats_update_begin(&tx_ring->tx_syncp); |
7848 | tx_ring->tx_stats.bytes += total_bytes; | |
7849 | tx_ring->tx_stats.packets += total_packets; | |
7850 | u64_stats_update_end(&tx_ring->tx_syncp); | |
0ba82994 AD |
7851 | q_vector->tx.total_bytes += total_bytes; |
7852 | q_vector->tx.total_packets += total_packets; | |
9d5c8243 | 7853 | |
6d095fa8 | 7854 | if (test_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags)) { |
13fde97a | 7855 | struct e1000_hw *hw = &adapter->hw; |
12dcd86b | 7856 | |
9d5c8243 | 7857 | /* Detect a transmit hang in hardware, this serializes the |
b980ac18 JK |
7858 | * check with the clearing of time_stamp and movement of i |
7859 | */ | |
6d095fa8 | 7860 | clear_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags); |
f4128785 | 7861 | if (tx_buffer->next_to_watch && |
8542db05 | 7862 | time_after(jiffies, tx_buffer->time_stamp + |
8e95a202 JP |
7863 | (adapter->tx_timeout_factor * HZ)) && |
7864 | !(rd32(E1000_STATUS) & E1000_STATUS_TXOFF)) { | |
9d5c8243 | 7865 | |
9d5c8243 | 7866 | /* detected Tx unit hang */ |
59d71989 | 7867 | dev_err(tx_ring->dev, |
9d5c8243 | 7868 | "Detected Tx Unit Hang\n" |
2d064c06 | 7869 | " Tx Queue <%d>\n" |
9d5c8243 AK |
7870 | " TDH <%x>\n" |
7871 | " TDT <%x>\n" | |
7872 | " next_to_use <%x>\n" | |
7873 | " next_to_clean <%x>\n" | |
9d5c8243 AK |
7874 | "buffer_info[next_to_clean]\n" |
7875 | " time_stamp <%lx>\n" | |
8542db05 | 7876 | " next_to_watch <%p>\n" |
9d5c8243 AK |
7877 | " jiffies <%lx>\n" |
7878 | " desc.status <%x>\n", | |
2d064c06 | 7879 | tx_ring->queue_index, |
238ac817 | 7880 | rd32(E1000_TDH(tx_ring->reg_idx)), |
fce99e34 | 7881 | readl(tx_ring->tail), |
9d5c8243 AK |
7882 | tx_ring->next_to_use, |
7883 | tx_ring->next_to_clean, | |
8542db05 | 7884 | tx_buffer->time_stamp, |
f4128785 | 7885 | tx_buffer->next_to_watch, |
9d5c8243 | 7886 | jiffies, |
f4128785 | 7887 | tx_buffer->next_to_watch->wb.status); |
13fde97a AD |
7888 | netif_stop_subqueue(tx_ring->netdev, |
7889 | tx_ring->queue_index); | |
7890 | ||
7891 | /* we are about to reset, no point in enabling stuff */ | |
7892 | return true; | |
9d5c8243 AK |
7893 | } |
7894 | } | |
13fde97a | 7895 | |
21ba6fe1 | 7896 | #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2) |
13fde97a | 7897 | if (unlikely(total_packets && |
b980ac18 JK |
7898 | netif_carrier_ok(tx_ring->netdev) && |
7899 | igb_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD)) { | |
13fde97a AD |
7900 | /* Make sure that anybody stopping the queue after this |
7901 | * sees the new next_to_clean. | |
7902 | */ | |
7903 | smp_mb(); | |
7904 | if (__netif_subqueue_stopped(tx_ring->netdev, | |
7905 | tx_ring->queue_index) && | |
7906 | !(test_bit(__IGB_DOWN, &adapter->state))) { | |
7907 | netif_wake_subqueue(tx_ring->netdev, | |
7908 | tx_ring->queue_index); | |
7909 | ||
7910 | u64_stats_update_begin(&tx_ring->tx_syncp); | |
7911 | tx_ring->tx_stats.restart_queue++; | |
7912 | u64_stats_update_end(&tx_ring->tx_syncp); | |
7913 | } | |
7914 | } | |
7915 | ||
7916 | return !!budget; | |
9d5c8243 AK |
7917 | } |
7918 | ||
cbc8e55f | 7919 | /** |
b980ac18 JK |
7920 | * igb_reuse_rx_page - page flip buffer and store it back on the ring |
7921 | * @rx_ring: rx descriptor ring to store buffers on | |
7922 | * @old_buff: donor buffer to have page reused | |
cbc8e55f | 7923 | * |
b980ac18 | 7924 | * Synchronizes page for reuse by the adapter |
cbc8e55f AD |
7925 | **/ |
7926 | static void igb_reuse_rx_page(struct igb_ring *rx_ring, | |
7927 | struct igb_rx_buffer *old_buff) | |
7928 | { | |
7929 | struct igb_rx_buffer *new_buff; | |
7930 | u16 nta = rx_ring->next_to_alloc; | |
7931 | ||
7932 | new_buff = &rx_ring->rx_buffer_info[nta]; | |
7933 | ||
7934 | /* update, and store next to alloc */ | |
7935 | nta++; | |
7936 | rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; | |
7937 | ||
e0142726 AD |
7938 | /* Transfer page from old buffer to new buffer. |
7939 | * Move each member individually to avoid possible store | |
7940 | * forwarding stalls. | |
7941 | */ | |
7942 | new_buff->dma = old_buff->dma; | |
7943 | new_buff->page = old_buff->page; | |
7944 | new_buff->page_offset = old_buff->page_offset; | |
7945 | new_buff->pagecnt_bias = old_buff->pagecnt_bias; | |
cbc8e55f AD |
7946 | } |
7947 | ||
95dd44b4 AD |
7948 | static inline bool igb_page_is_reserved(struct page *page) |
7949 | { | |
2f064f34 | 7950 | return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page); |
95dd44b4 AD |
7951 | } |
7952 | ||
e0142726 | 7953 | static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer) |
74e238ea | 7954 | { |
e0142726 AD |
7955 | unsigned int pagecnt_bias = rx_buffer->pagecnt_bias; |
7956 | struct page *page = rx_buffer->page; | |
bd4171a5 | 7957 | |
74e238ea | 7958 | /* avoid re-using remote pages */ |
95dd44b4 | 7959 | if (unlikely(igb_page_is_reserved(page))) |
bc16e47f RG |
7960 | return false; |
7961 | ||
74e238ea AD |
7962 | #if (PAGE_SIZE < 8192) |
7963 | /* if we are only owner of page we can reuse it */ | |
e0142726 | 7964 | if (unlikely((page_ref_count(page) - pagecnt_bias) > 1)) |
74e238ea | 7965 | return false; |
74e238ea | 7966 | #else |
8649aaef AD |
7967 | #define IGB_LAST_OFFSET \ |
7968 | (SKB_WITH_OVERHEAD(PAGE_SIZE) - IGB_RXBUFFER_2048) | |
74e238ea | 7969 | |
8649aaef | 7970 | if (rx_buffer->page_offset > IGB_LAST_OFFSET) |
74e238ea | 7971 | return false; |
74e238ea AD |
7972 | #endif |
7973 | ||
bd4171a5 AD |
7974 | /* If we have drained the page fragment pool we need to update |
7975 | * the pagecnt_bias and page count so that we fully restock the | |
7976 | * number of references the driver holds. | |
95dd44b4 | 7977 | */ |
e0142726 | 7978 | if (unlikely(!pagecnt_bias)) { |
bd4171a5 AD |
7979 | page_ref_add(page, USHRT_MAX); |
7980 | rx_buffer->pagecnt_bias = USHRT_MAX; | |
7981 | } | |
95dd44b4 | 7982 | |
74e238ea AD |
7983 | return true; |
7984 | } | |
7985 | ||
cbc8e55f | 7986 | /** |
b980ac18 JK |
7987 | * igb_add_rx_frag - Add contents of Rx buffer to sk_buff |
7988 | * @rx_ring: rx descriptor ring to transact packets on | |
7989 | * @rx_buffer: buffer containing page to add | |
b980ac18 | 7990 | * @skb: sk_buff to place the data into |
e0142726 | 7991 | * @size: size of buffer to be added |
cbc8e55f | 7992 | * |
b980ac18 | 7993 | * This function will add the data contained in rx_buffer->page to the skb. |
cbc8e55f | 7994 | **/ |
e0142726 | 7995 | static void igb_add_rx_frag(struct igb_ring *rx_ring, |
cbc8e55f | 7996 | struct igb_rx_buffer *rx_buffer, |
e0142726 AD |
7997 | struct sk_buff *skb, |
7998 | unsigned int size) | |
cbc8e55f | 7999 | { |
74e238ea | 8000 | #if (PAGE_SIZE < 8192) |
8649aaef | 8001 | unsigned int truesize = igb_rx_pg_size(rx_ring) / 2; |
74e238ea | 8002 | #else |
e3cdf68d AD |
8003 | unsigned int truesize = ring_uses_build_skb(rx_ring) ? |
8004 | SKB_DATA_ALIGN(IGB_SKB_PAD + size) : | |
8005 | SKB_DATA_ALIGN(size); | |
74e238ea | 8006 | #endif |
e0142726 AD |
8007 | skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page, |
8008 | rx_buffer->page_offset, size, truesize); | |
8009 | #if (PAGE_SIZE < 8192) | |
8010 | rx_buffer->page_offset ^= truesize; | |
8011 | #else | |
8012 | rx_buffer->page_offset += truesize; | |
8013 | #endif | |
8014 | } | |
8015 | ||
8016 | static struct sk_buff *igb_construct_skb(struct igb_ring *rx_ring, | |
8017 | struct igb_rx_buffer *rx_buffer, | |
8018 | union e1000_adv_rx_desc *rx_desc, | |
8019 | unsigned int size) | |
8020 | { | |
8021 | void *va = page_address(rx_buffer->page) + rx_buffer->page_offset; | |
8022 | #if (PAGE_SIZE < 8192) | |
8023 | unsigned int truesize = igb_rx_pg_size(rx_ring) / 2; | |
8024 | #else | |
8025 | unsigned int truesize = SKB_DATA_ALIGN(size); | |
8026 | #endif | |
8027 | unsigned int headlen; | |
8028 | struct sk_buff *skb; | |
cbc8e55f | 8029 | |
e0142726 AD |
8030 | /* prefetch first cache line of first page */ |
8031 | prefetch(va); | |
8032 | #if L1_CACHE_BYTES < 128 | |
8033 | prefetch(va + L1_CACHE_BYTES); | |
8034 | #endif | |
8035 | ||
8036 | /* allocate a skb to store the frags */ | |
8037 | skb = napi_alloc_skb(&rx_ring->q_vector->napi, IGB_RX_HDR_LEN); | |
8038 | if (unlikely(!skb)) | |
8039 | return NULL; | |
cbc8e55f | 8040 | |
f56e7bba AD |
8041 | if (unlikely(igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP))) { |
8042 | igb_ptp_rx_pktstamp(rx_ring->q_vector, va, skb); | |
8043 | va += IGB_TS_HDR_LEN; | |
8044 | size -= IGB_TS_HDR_LEN; | |
8045 | } | |
cbc8e55f | 8046 | |
e0142726 AD |
8047 | /* Determine available headroom for copy */ |
8048 | headlen = size; | |
8049 | if (headlen > IGB_RX_HDR_LEN) | |
c43f1255 | 8050 | headlen = eth_get_headlen(skb->dev, va, IGB_RX_HDR_LEN); |
f56e7bba AD |
8051 | |
8052 | /* align pull length to size of long to optimize memcpy performance */ | |
e0142726 | 8053 | memcpy(__skb_put(skb, headlen), va, ALIGN(headlen, sizeof(long))); |
f56e7bba AD |
8054 | |
8055 | /* update all of the pointers */ | |
e0142726 AD |
8056 | size -= headlen; |
8057 | if (size) { | |
8058 | skb_add_rx_frag(skb, 0, rx_buffer->page, | |
8059 | (va + headlen) - page_address(rx_buffer->page), | |
8060 | size, truesize); | |
8061 | #if (PAGE_SIZE < 8192) | |
8062 | rx_buffer->page_offset ^= truesize; | |
8063 | #else | |
8064 | rx_buffer->page_offset += truesize; | |
2e334eee | 8065 | #endif |
2e334eee | 8066 | } else { |
e0142726 | 8067 | rx_buffer->pagecnt_bias++; |
2e334eee AD |
8068 | } |
8069 | ||
2e334eee AD |
8070 | return skb; |
8071 | } | |
8072 | ||
b1bb2eb0 AD |
8073 | static struct sk_buff *igb_build_skb(struct igb_ring *rx_ring, |
8074 | struct igb_rx_buffer *rx_buffer, | |
8075 | union e1000_adv_rx_desc *rx_desc, | |
8076 | unsigned int size) | |
8077 | { | |
8078 | void *va = page_address(rx_buffer->page) + rx_buffer->page_offset; | |
8079 | #if (PAGE_SIZE < 8192) | |
8080 | unsigned int truesize = igb_rx_pg_size(rx_ring) / 2; | |
8081 | #else | |
8082 | unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) + | |
8083 | SKB_DATA_ALIGN(IGB_SKB_PAD + size); | |
8084 | #endif | |
8085 | struct sk_buff *skb; | |
8086 | ||
8087 | /* prefetch first cache line of first page */ | |
8088 | prefetch(va); | |
8089 | #if L1_CACHE_BYTES < 128 | |
8090 | prefetch(va + L1_CACHE_BYTES); | |
8091 | #endif | |
8092 | ||
3a1eb6d1 | 8093 | /* build an skb around the page buffer */ |
b1bb2eb0 AD |
8094 | skb = build_skb(va - IGB_SKB_PAD, truesize); |
8095 | if (unlikely(!skb)) | |
8096 | return NULL; | |
8097 | ||
8098 | /* update pointers within the skb to store the data */ | |
8099 | skb_reserve(skb, IGB_SKB_PAD); | |
8100 | __skb_put(skb, size); | |
8101 | ||
8102 | /* pull timestamp out of packet data */ | |
8103 | if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) { | |
8104 | igb_ptp_rx_pktstamp(rx_ring->q_vector, skb->data, skb); | |
8105 | __skb_pull(skb, IGB_TS_HDR_LEN); | |
8106 | } | |
8107 | ||
8108 | /* update buffer offset */ | |
8109 | #if (PAGE_SIZE < 8192) | |
8110 | rx_buffer->page_offset ^= truesize; | |
8111 | #else | |
8112 | rx_buffer->page_offset += truesize; | |
8113 | #endif | |
8114 | ||
8115 | return skb; | |
8116 | } | |
8117 | ||
cd392f5c | 8118 | static inline void igb_rx_checksum(struct igb_ring *ring, |
3ceb90fd AD |
8119 | union e1000_adv_rx_desc *rx_desc, |
8120 | struct sk_buff *skb) | |
9d5c8243 | 8121 | { |
bc8acf2c | 8122 | skb_checksum_none_assert(skb); |
9d5c8243 | 8123 | |
294e7d78 | 8124 | /* Ignore Checksum bit is set */ |
3ceb90fd | 8125 | if (igb_test_staterr(rx_desc, E1000_RXD_STAT_IXSM)) |
294e7d78 AD |
8126 | return; |
8127 | ||
8128 | /* Rx checksum disabled via ethtool */ | |
8129 | if (!(ring->netdev->features & NETIF_F_RXCSUM)) | |
9d5c8243 | 8130 | return; |
85ad76b2 | 8131 | |
9d5c8243 | 8132 | /* TCP/UDP checksum error bit is set */ |
3ceb90fd AD |
8133 | if (igb_test_staterr(rx_desc, |
8134 | E1000_RXDEXT_STATERR_TCPE | | |
8135 | E1000_RXDEXT_STATERR_IPE)) { | |
b980ac18 | 8136 | /* work around errata with sctp packets where the TCPE aka |
b9473560 JB |
8137 | * L4E bit is set incorrectly on 64 byte (60 byte w/o crc) |
8138 | * packets, (aka let the stack check the crc32c) | |
8139 | */ | |
866cff06 AD |
8140 | if (!((skb->len == 60) && |
8141 | test_bit(IGB_RING_FLAG_RX_SCTP_CSUM, &ring->flags))) { | |
12dcd86b | 8142 | u64_stats_update_begin(&ring->rx_syncp); |
04a5fcaa | 8143 | ring->rx_stats.csum_err++; |
12dcd86b ED |
8144 | u64_stats_update_end(&ring->rx_syncp); |
8145 | } | |
9d5c8243 | 8146 | /* let the stack verify checksum errors */ |
9d5c8243 AK |
8147 | return; |
8148 | } | |
8149 | /* It must be a TCP or UDP packet with a valid checksum */ | |
3ceb90fd AD |
8150 | if (igb_test_staterr(rx_desc, E1000_RXD_STAT_TCPCS | |
8151 | E1000_RXD_STAT_UDPCS)) | |
9d5c8243 AK |
8152 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
8153 | ||
3ceb90fd AD |
8154 | dev_dbg(ring->dev, "cksum success: bits %08X\n", |
8155 | le32_to_cpu(rx_desc->wb.upper.status_error)); | |
9d5c8243 AK |
8156 | } |
8157 | ||
077887c3 AD |
8158 | static inline void igb_rx_hash(struct igb_ring *ring, |
8159 | union e1000_adv_rx_desc *rx_desc, | |
8160 | struct sk_buff *skb) | |
8161 | { | |
8162 | if (ring->netdev->features & NETIF_F_RXHASH) | |
42bdf083 TH |
8163 | skb_set_hash(skb, |
8164 | le32_to_cpu(rx_desc->wb.lower.hi_dword.rss), | |
8165 | PKT_HASH_TYPE_L3); | |
077887c3 AD |
8166 | } |
8167 | ||
2e334eee | 8168 | /** |
b980ac18 JK |
8169 | * igb_is_non_eop - process handling of non-EOP buffers |
8170 | * @rx_ring: Rx ring being processed | |
8171 | * @rx_desc: Rx descriptor for current buffer | |
8172 | * @skb: current socket buffer containing buffer in progress | |
2e334eee | 8173 | * |
b980ac18 JK |
8174 | * This function updates next to clean. If the buffer is an EOP buffer |
8175 | * this function exits returning false, otherwise it will place the | |
8176 | * sk_buff in the next buffer to be chained and return true indicating | |
8177 | * that this is in fact a non-EOP buffer. | |
2e334eee AD |
8178 | **/ |
8179 | static bool igb_is_non_eop(struct igb_ring *rx_ring, | |
8180 | union e1000_adv_rx_desc *rx_desc) | |
8181 | { | |
8182 | u32 ntc = rx_ring->next_to_clean + 1; | |
8183 | ||
8184 | /* fetch, update, and store next to clean */ | |
8185 | ntc = (ntc < rx_ring->count) ? ntc : 0; | |
8186 | rx_ring->next_to_clean = ntc; | |
8187 | ||
8188 | prefetch(IGB_RX_DESC(rx_ring, ntc)); | |
8189 | ||
8190 | if (likely(igb_test_staterr(rx_desc, E1000_RXD_STAT_EOP))) | |
8191 | return false; | |
8192 | ||
8193 | return true; | |
8194 | } | |
8195 | ||
1a1c225b | 8196 | /** |
b980ac18 JK |
8197 | * igb_cleanup_headers - Correct corrupted or empty headers |
8198 | * @rx_ring: rx descriptor ring packet is being transacted on | |
8199 | * @rx_desc: pointer to the EOP Rx descriptor | |
8200 | * @skb: pointer to current skb being fixed | |
1a1c225b | 8201 | * |
b980ac18 JK |
8202 | * Address the case where we are pulling data in on pages only |
8203 | * and as such no data is present in the skb header. | |
1a1c225b | 8204 | * |
b980ac18 JK |
8205 | * In addition if skb is not at least 60 bytes we need to pad it so that |
8206 | * it is large enough to qualify as a valid Ethernet frame. | |
1a1c225b | 8207 | * |
b980ac18 | 8208 | * Returns true if an error was encountered and skb was freed. |
1a1c225b AD |
8209 | **/ |
8210 | static bool igb_cleanup_headers(struct igb_ring *rx_ring, | |
8211 | union e1000_adv_rx_desc *rx_desc, | |
8212 | struct sk_buff *skb) | |
8213 | { | |
1a1c225b AD |
8214 | if (unlikely((igb_test_staterr(rx_desc, |
8215 | E1000_RXDEXT_ERR_FRAME_ERR_MASK)))) { | |
8216 | struct net_device *netdev = rx_ring->netdev; | |
8217 | if (!(netdev->features & NETIF_F_RXALL)) { | |
8218 | dev_kfree_skb_any(skb); | |
8219 | return true; | |
8220 | } | |
8221 | } | |
8222 | ||
a94d9e22 AD |
8223 | /* if eth_skb_pad returns an error the skb was freed */ |
8224 | if (eth_skb_pad(skb)) | |
8225 | return true; | |
1a1c225b AD |
8226 | |
8227 | return false; | |
2d94d8ab AD |
8228 | } |
8229 | ||
db2ee5bd | 8230 | /** |
b980ac18 JK |
8231 | * igb_process_skb_fields - Populate skb header fields from Rx descriptor |
8232 | * @rx_ring: rx descriptor ring packet is being transacted on | |
8233 | * @rx_desc: pointer to the EOP Rx descriptor | |
8234 | * @skb: pointer to current skb being populated | |
db2ee5bd | 8235 | * |
b980ac18 JK |
8236 | * This function checks the ring, descriptor, and packet information in |
8237 | * order to populate the hash, checksum, VLAN, timestamp, protocol, and | |
8238 | * other fields within the skb. | |
db2ee5bd AD |
8239 | **/ |
8240 | static void igb_process_skb_fields(struct igb_ring *rx_ring, | |
8241 | union e1000_adv_rx_desc *rx_desc, | |
8242 | struct sk_buff *skb) | |
8243 | { | |
8244 | struct net_device *dev = rx_ring->netdev; | |
8245 | ||
8246 | igb_rx_hash(rx_ring, rx_desc, skb); | |
8247 | ||
8248 | igb_rx_checksum(rx_ring, rx_desc, skb); | |
8249 | ||
5499a968 JK |
8250 | if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TS) && |
8251 | !igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) | |
8252 | igb_ptp_rx_rgtstamp(rx_ring->q_vector, skb); | |
db2ee5bd | 8253 | |
f646968f | 8254 | if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) && |
db2ee5bd AD |
8255 | igb_test_staterr(rx_desc, E1000_RXD_STAT_VP)) { |
8256 | u16 vid; | |
9005df38 | 8257 | |
db2ee5bd AD |
8258 | if (igb_test_staterr(rx_desc, E1000_RXDEXT_STATERR_LB) && |
8259 | test_bit(IGB_RING_FLAG_RX_LB_VLAN_BSWAP, &rx_ring->flags)) | |
8260 | vid = be16_to_cpu(rx_desc->wb.upper.vlan); | |
8261 | else | |
8262 | vid = le16_to_cpu(rx_desc->wb.upper.vlan); | |
8263 | ||
86a9bad3 | 8264 | __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid); |
db2ee5bd AD |
8265 | } |
8266 | ||
8267 | skb_record_rx_queue(skb, rx_ring->queue_index); | |
8268 | ||
8269 | skb->protocol = eth_type_trans(skb, rx_ring->netdev); | |
8270 | } | |
8271 | ||
e0142726 AD |
8272 | static struct igb_rx_buffer *igb_get_rx_buffer(struct igb_ring *rx_ring, |
8273 | const unsigned int size) | |
8274 | { | |
8275 | struct igb_rx_buffer *rx_buffer; | |
8276 | ||
8277 | rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean]; | |
8278 | prefetchw(rx_buffer->page); | |
8279 | ||
8280 | /* we are reusing so sync this buffer for CPU use */ | |
8281 | dma_sync_single_range_for_cpu(rx_ring->dev, | |
8282 | rx_buffer->dma, | |
8283 | rx_buffer->page_offset, | |
8284 | size, | |
8285 | DMA_FROM_DEVICE); | |
8286 | ||
8287 | rx_buffer->pagecnt_bias--; | |
8288 | ||
8289 | return rx_buffer; | |
8290 | } | |
8291 | ||
8292 | static void igb_put_rx_buffer(struct igb_ring *rx_ring, | |
8293 | struct igb_rx_buffer *rx_buffer) | |
8294 | { | |
8295 | if (igb_can_reuse_rx_page(rx_buffer)) { | |
8296 | /* hand second half of page back to the ring */ | |
8297 | igb_reuse_rx_page(rx_ring, rx_buffer); | |
8298 | } else { | |
8299 | /* We are not reusing the buffer so unmap it and free | |
8300 | * any references we are holding to it | |
8301 | */ | |
8302 | dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma, | |
8303 | igb_rx_pg_size(rx_ring), DMA_FROM_DEVICE, | |
8304 | IGB_RX_DMA_ATTR); | |
8305 | __page_frag_cache_drain(rx_buffer->page, | |
8306 | rx_buffer->pagecnt_bias); | |
8307 | } | |
8308 | ||
8309 | /* clear contents of rx_buffer */ | |
8310 | rx_buffer->page = NULL; | |
8311 | } | |
8312 | ||
32b3e08f | 8313 | static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget) |
9d5c8243 | 8314 | { |
0ba82994 | 8315 | struct igb_ring *rx_ring = q_vector->rx.ring; |
1a1c225b | 8316 | struct sk_buff *skb = rx_ring->skb; |
9d5c8243 | 8317 | unsigned int total_bytes = 0, total_packets = 0; |
16eb8815 | 8318 | u16 cleaned_count = igb_desc_unused(rx_ring); |
9d5c8243 | 8319 | |
57ba34c9 | 8320 | while (likely(total_packets < budget)) { |
2e334eee | 8321 | union e1000_adv_rx_desc *rx_desc; |
e0142726 AD |
8322 | struct igb_rx_buffer *rx_buffer; |
8323 | unsigned int size; | |
bf36c1a0 | 8324 | |
2e334eee AD |
8325 | /* return some buffers to hardware, one at a time is too slow */ |
8326 | if (cleaned_count >= IGB_RX_BUFFER_WRITE) { | |
8327 | igb_alloc_rx_buffers(rx_ring, cleaned_count); | |
8328 | cleaned_count = 0; | |
8329 | } | |
bf36c1a0 | 8330 | |
2e334eee | 8331 | rx_desc = IGB_RX_DESC(rx_ring, rx_ring->next_to_clean); |
e0142726 AD |
8332 | size = le16_to_cpu(rx_desc->wb.upper.length); |
8333 | if (!size) | |
2e334eee | 8334 | break; |
9d5c8243 | 8335 | |
74e238ea AD |
8336 | /* This memory barrier is needed to keep us from reading |
8337 | * any other fields out of the rx_desc until we know the | |
124b74c1 | 8338 | * descriptor has been written back |
74e238ea | 8339 | */ |
124b74c1 | 8340 | dma_rmb(); |
74e238ea | 8341 | |
e0142726 AD |
8342 | rx_buffer = igb_get_rx_buffer(rx_ring, size); |
8343 | ||
2e334eee | 8344 | /* retrieve a buffer from the ring */ |
e0142726 AD |
8345 | if (skb) |
8346 | igb_add_rx_frag(rx_ring, rx_buffer, skb, size); | |
b1bb2eb0 AD |
8347 | else if (ring_uses_build_skb(rx_ring)) |
8348 | skb = igb_build_skb(rx_ring, rx_buffer, rx_desc, size); | |
e0142726 AD |
8349 | else |
8350 | skb = igb_construct_skb(rx_ring, rx_buffer, | |
8351 | rx_desc, size); | |
9d5c8243 | 8352 | |
2e334eee | 8353 | /* exit if we failed to retrieve a buffer */ |
e0142726 AD |
8354 | if (!skb) { |
8355 | rx_ring->rx_stats.alloc_failed++; | |
8356 | rx_buffer->pagecnt_bias++; | |
2e334eee | 8357 | break; |
e0142726 | 8358 | } |
1a1c225b | 8359 | |
e0142726 | 8360 | igb_put_rx_buffer(rx_ring, rx_buffer); |
2e334eee | 8361 | cleaned_count++; |
1a1c225b | 8362 | |
2e334eee AD |
8363 | /* fetch next buffer in frame if non-eop */ |
8364 | if (igb_is_non_eop(rx_ring, rx_desc)) | |
8365 | continue; | |
1a1c225b AD |
8366 | |
8367 | /* verify the packet layout is correct */ | |
8368 | if (igb_cleanup_headers(rx_ring, rx_desc, skb)) { | |
8369 | skb = NULL; | |
8370 | continue; | |
9d5c8243 | 8371 | } |
9d5c8243 | 8372 | |
db2ee5bd | 8373 | /* probably a little skewed due to removing CRC */ |
3ceb90fd | 8374 | total_bytes += skb->len; |
3ceb90fd | 8375 | |
db2ee5bd AD |
8376 | /* populate checksum, timestamp, VLAN, and protocol */ |
8377 | igb_process_skb_fields(rx_ring, rx_desc, skb); | |
3ceb90fd | 8378 | |
b2cb09b1 | 8379 | napi_gro_receive(&q_vector->napi, skb); |
9d5c8243 | 8380 | |
1a1c225b AD |
8381 | /* reset skb pointer */ |
8382 | skb = NULL; | |
8383 | ||
2e334eee AD |
8384 | /* update budget accounting */ |
8385 | total_packets++; | |
57ba34c9 | 8386 | } |
bf36c1a0 | 8387 | |
1a1c225b AD |
8388 | /* place incomplete frames back on ring for completion */ |
8389 | rx_ring->skb = skb; | |
8390 | ||
12dcd86b | 8391 | u64_stats_update_begin(&rx_ring->rx_syncp); |
9d5c8243 AK |
8392 | rx_ring->rx_stats.packets += total_packets; |
8393 | rx_ring->rx_stats.bytes += total_bytes; | |
12dcd86b | 8394 | u64_stats_update_end(&rx_ring->rx_syncp); |
0ba82994 AD |
8395 | q_vector->rx.total_packets += total_packets; |
8396 | q_vector->rx.total_bytes += total_bytes; | |
c023cd88 AD |
8397 | |
8398 | if (cleaned_count) | |
cd392f5c | 8399 | igb_alloc_rx_buffers(rx_ring, cleaned_count); |
c023cd88 | 8400 | |
32b3e08f | 8401 | return total_packets; |
9d5c8243 AK |
8402 | } |
8403 | ||
e3cdf68d AD |
8404 | static inline unsigned int igb_rx_offset(struct igb_ring *rx_ring) |
8405 | { | |
8406 | return ring_uses_build_skb(rx_ring) ? IGB_SKB_PAD : 0; | |
8407 | } | |
8408 | ||
c023cd88 | 8409 | static bool igb_alloc_mapped_page(struct igb_ring *rx_ring, |
06034649 | 8410 | struct igb_rx_buffer *bi) |
c023cd88 AD |
8411 | { |
8412 | struct page *page = bi->page; | |
cbc8e55f | 8413 | dma_addr_t dma; |
c023cd88 | 8414 | |
cbc8e55f AD |
8415 | /* since we are recycling buffers we should seldom need to alloc */ |
8416 | if (likely(page)) | |
c023cd88 AD |
8417 | return true; |
8418 | ||
cbc8e55f | 8419 | /* alloc new page for storage */ |
8649aaef | 8420 | page = dev_alloc_pages(igb_rx_pg_order(rx_ring)); |
cbc8e55f AD |
8421 | if (unlikely(!page)) { |
8422 | rx_ring->rx_stats.alloc_failed++; | |
8423 | return false; | |
c023cd88 AD |
8424 | } |
8425 | ||
cbc8e55f | 8426 | /* map page for use */ |
8649aaef AD |
8427 | dma = dma_map_page_attrs(rx_ring->dev, page, 0, |
8428 | igb_rx_pg_size(rx_ring), | |
8429 | DMA_FROM_DEVICE, | |
8430 | IGB_RX_DMA_ATTR); | |
c023cd88 | 8431 | |
b980ac18 | 8432 | /* if mapping failed free memory back to system since |
cbc8e55f AD |
8433 | * there isn't much point in holding memory we can't use |
8434 | */ | |
1a1c225b | 8435 | if (dma_mapping_error(rx_ring->dev, dma)) { |
8649aaef | 8436 | __free_pages(page, igb_rx_pg_order(rx_ring)); |
cbc8e55f | 8437 | |
c023cd88 AD |
8438 | rx_ring->rx_stats.alloc_failed++; |
8439 | return false; | |
8440 | } | |
8441 | ||
1a1c225b | 8442 | bi->dma = dma; |
cbc8e55f | 8443 | bi->page = page; |
e3cdf68d | 8444 | bi->page_offset = igb_rx_offset(rx_ring); |
bd4171a5 | 8445 | bi->pagecnt_bias = 1; |
1a1c225b | 8446 | |
c023cd88 AD |
8447 | return true; |
8448 | } | |
8449 | ||
9d5c8243 | 8450 | /** |
b980ac18 JK |
8451 | * igb_alloc_rx_buffers - Replace used receive buffers; packet split |
8452 | * @adapter: address of board private structure | |
9d5c8243 | 8453 | **/ |
cd392f5c | 8454 | void igb_alloc_rx_buffers(struct igb_ring *rx_ring, u16 cleaned_count) |
9d5c8243 | 8455 | { |
9d5c8243 | 8456 | union e1000_adv_rx_desc *rx_desc; |
06034649 | 8457 | struct igb_rx_buffer *bi; |
c023cd88 | 8458 | u16 i = rx_ring->next_to_use; |
8649aaef | 8459 | u16 bufsz; |
9d5c8243 | 8460 | |
cbc8e55f AD |
8461 | /* nothing to do */ |
8462 | if (!cleaned_count) | |
8463 | return; | |
8464 | ||
60136906 | 8465 | rx_desc = IGB_RX_DESC(rx_ring, i); |
06034649 | 8466 | bi = &rx_ring->rx_buffer_info[i]; |
c023cd88 | 8467 | i -= rx_ring->count; |
9d5c8243 | 8468 | |
8649aaef AD |
8469 | bufsz = igb_rx_bufsz(rx_ring); |
8470 | ||
cbc8e55f | 8471 | do { |
1a1c225b | 8472 | if (!igb_alloc_mapped_page(rx_ring, bi)) |
c023cd88 | 8473 | break; |
9d5c8243 | 8474 | |
5be59554 AD |
8475 | /* sync the buffer for use by the device */ |
8476 | dma_sync_single_range_for_device(rx_ring->dev, bi->dma, | |
8649aaef | 8477 | bi->page_offset, bufsz, |
5be59554 AD |
8478 | DMA_FROM_DEVICE); |
8479 | ||
b980ac18 | 8480 | /* Refresh the desc even if buffer_addrs didn't change |
cbc8e55f AD |
8481 | * because each write-back erases this info. |
8482 | */ | |
f9d40f6a | 8483 | rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset); |
9d5c8243 | 8484 | |
c023cd88 AD |
8485 | rx_desc++; |
8486 | bi++; | |
9d5c8243 | 8487 | i++; |
c023cd88 | 8488 | if (unlikely(!i)) { |
60136906 | 8489 | rx_desc = IGB_RX_DESC(rx_ring, 0); |
06034649 | 8490 | bi = rx_ring->rx_buffer_info; |
c023cd88 AD |
8491 | i -= rx_ring->count; |
8492 | } | |
8493 | ||
7ec0116c AD |
8494 | /* clear the length for the next_to_use descriptor */ |
8495 | rx_desc->wb.upper.length = 0; | |
cbc8e55f AD |
8496 | |
8497 | cleaned_count--; | |
8498 | } while (cleaned_count); | |
9d5c8243 | 8499 | |
c023cd88 AD |
8500 | i += rx_ring->count; |
8501 | ||
9d5c8243 | 8502 | if (rx_ring->next_to_use != i) { |
cbc8e55f | 8503 | /* record the next descriptor to use */ |
9d5c8243 | 8504 | rx_ring->next_to_use = i; |
9d5c8243 | 8505 | |
cbc8e55f AD |
8506 | /* update next to alloc since we have filled the ring */ |
8507 | rx_ring->next_to_alloc = i; | |
8508 | ||
b980ac18 | 8509 | /* Force memory writes to complete before letting h/w |
9d5c8243 AK |
8510 | * know there are new descriptors to fetch. (Only |
8511 | * applicable for weak-ordered memory model archs, | |
cbc8e55f AD |
8512 | * such as IA-64). |
8513 | */ | |
73017f4e | 8514 | dma_wmb(); |
fce99e34 | 8515 | writel(i, rx_ring->tail); |
9d5c8243 AK |
8516 | } |
8517 | } | |
8518 | ||
8519 | /** | |
8520 | * igb_mii_ioctl - | |
8521 | * @netdev: | |
8522 | * @ifreq: | |
8523 | * @cmd: | |
8524 | **/ | |
8525 | static int igb_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) | |
8526 | { | |
8527 | struct igb_adapter *adapter = netdev_priv(netdev); | |
8528 | struct mii_ioctl_data *data = if_mii(ifr); | |
8529 | ||
8530 | if (adapter->hw.phy.media_type != e1000_media_type_copper) | |
8531 | return -EOPNOTSUPP; | |
8532 | ||
8533 | switch (cmd) { | |
8534 | case SIOCGMIIPHY: | |
8535 | data->phy_id = adapter->hw.phy.addr; | |
8536 | break; | |
8537 | case SIOCGMIIREG: | |
f5f4cf08 | 8538 | if (igb_read_phy_reg(&adapter->hw, data->reg_num & 0x1F, |
9005df38 | 8539 | &data->val_out)) |
9d5c8243 AK |
8540 | return -EIO; |
8541 | break; | |
8542 | case SIOCSMIIREG: | |
8543 | default: | |
8544 | return -EOPNOTSUPP; | |
8545 | } | |
8546 | return 0; | |
8547 | } | |
8548 | ||
8549 | /** | |
8550 | * igb_ioctl - | |
8551 | * @netdev: | |
8552 | * @ifreq: | |
8553 | * @cmd: | |
8554 | **/ | |
8555 | static int igb_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) | |
8556 | { | |
8557 | switch (cmd) { | |
8558 | case SIOCGMIIPHY: | |
8559 | case SIOCGMIIREG: | |
8560 | case SIOCSMIIREG: | |
8561 | return igb_mii_ioctl(netdev, ifr, cmd); | |
6ab5f7b2 JK |
8562 | case SIOCGHWTSTAMP: |
8563 | return igb_ptp_get_ts_config(netdev, ifr); | |
c6cb090b | 8564 | case SIOCSHWTSTAMP: |
6ab5f7b2 | 8565 | return igb_ptp_set_ts_config(netdev, ifr); |
9d5c8243 AK |
8566 | default: |
8567 | return -EOPNOTSUPP; | |
8568 | } | |
8569 | } | |
8570 | ||
94826487 TF |
8571 | void igb_read_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value) |
8572 | { | |
8573 | struct igb_adapter *adapter = hw->back; | |
8574 | ||
8575 | pci_read_config_word(adapter->pdev, reg, value); | |
8576 | } | |
8577 | ||
8578 | void igb_write_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value) | |
8579 | { | |
8580 | struct igb_adapter *adapter = hw->back; | |
8581 | ||
8582 | pci_write_config_word(adapter->pdev, reg, *value); | |
8583 | } | |
8584 | ||
009bc06e AD |
8585 | s32 igb_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value) |
8586 | { | |
8587 | struct igb_adapter *adapter = hw->back; | |
009bc06e | 8588 | |
23d028cc | 8589 | if (pcie_capability_read_word(adapter->pdev, reg, value)) |
009bc06e AD |
8590 | return -E1000_ERR_CONFIG; |
8591 | ||
009bc06e AD |
8592 | return 0; |
8593 | } | |
8594 | ||
8595 | s32 igb_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value) | |
8596 | { | |
8597 | struct igb_adapter *adapter = hw->back; | |
009bc06e | 8598 | |
23d028cc | 8599 | if (pcie_capability_write_word(adapter->pdev, reg, *value)) |
009bc06e AD |
8600 | return -E1000_ERR_CONFIG; |
8601 | ||
009bc06e AD |
8602 | return 0; |
8603 | } | |
8604 | ||
c8f44aff | 8605 | static void igb_vlan_mode(struct net_device *netdev, netdev_features_t features) |
9d5c8243 AK |
8606 | { |
8607 | struct igb_adapter *adapter = netdev_priv(netdev); | |
8608 | struct e1000_hw *hw = &adapter->hw; | |
8609 | u32 ctrl, rctl; | |
f646968f | 8610 | bool enable = !!(features & NETIF_F_HW_VLAN_CTAG_RX); |
9d5c8243 | 8611 | |
5faf030c | 8612 | if (enable) { |
9d5c8243 AK |
8613 | /* enable VLAN tag insert/strip */ |
8614 | ctrl = rd32(E1000_CTRL); | |
8615 | ctrl |= E1000_CTRL_VME; | |
8616 | wr32(E1000_CTRL, ctrl); | |
8617 | ||
51466239 | 8618 | /* Disable CFI check */ |
9d5c8243 | 8619 | rctl = rd32(E1000_RCTL); |
9d5c8243 AK |
8620 | rctl &= ~E1000_RCTL_CFIEN; |
8621 | wr32(E1000_RCTL, rctl); | |
9d5c8243 AK |
8622 | } else { |
8623 | /* disable VLAN tag insert/strip */ | |
8624 | ctrl = rd32(E1000_CTRL); | |
8625 | ctrl &= ~E1000_CTRL_VME; | |
8626 | wr32(E1000_CTRL, ctrl); | |
9d5c8243 AK |
8627 | } |
8628 | ||
030f9f52 | 8629 | igb_set_vf_vlan_strip(adapter, adapter->vfs_allocated_count, enable); |
9d5c8243 AK |
8630 | } |
8631 | ||
80d5c368 PM |
8632 | static int igb_vlan_rx_add_vid(struct net_device *netdev, |
8633 | __be16 proto, u16 vid) | |
9d5c8243 AK |
8634 | { |
8635 | struct igb_adapter *adapter = netdev_priv(netdev); | |
8636 | struct e1000_hw *hw = &adapter->hw; | |
4ae196df | 8637 | int pf_id = adapter->vfs_allocated_count; |
9d5c8243 | 8638 | |
51466239 | 8639 | /* add the filter since PF can receive vlans w/o entry in vlvf */ |
16903caa AD |
8640 | if (!vid || !(adapter->flags & IGB_FLAG_VLAN_PROMISC)) |
8641 | igb_vfta_set(hw, vid, pf_id, true, !!vid); | |
b2cb09b1 JP |
8642 | |
8643 | set_bit(vid, adapter->active_vlans); | |
8e586137 JP |
8644 | |
8645 | return 0; | |
9d5c8243 AK |
8646 | } |
8647 | ||
80d5c368 PM |
8648 | static int igb_vlan_rx_kill_vid(struct net_device *netdev, |
8649 | __be16 proto, u16 vid) | |
9d5c8243 AK |
8650 | { |
8651 | struct igb_adapter *adapter = netdev_priv(netdev); | |
4ae196df | 8652 | int pf_id = adapter->vfs_allocated_count; |
8b77c6b2 | 8653 | struct e1000_hw *hw = &adapter->hw; |
9d5c8243 | 8654 | |
8b77c6b2 | 8655 | /* remove VID from filter table */ |
16903caa AD |
8656 | if (vid && !(adapter->flags & IGB_FLAG_VLAN_PROMISC)) |
8657 | igb_vfta_set(hw, vid, pf_id, false, true); | |
b2cb09b1 JP |
8658 | |
8659 | clear_bit(vid, adapter->active_vlans); | |
8e586137 JP |
8660 | |
8661 | return 0; | |
9d5c8243 AK |
8662 | } |
8663 | ||
8664 | static void igb_restore_vlan(struct igb_adapter *adapter) | |
8665 | { | |
5982a556 | 8666 | u16 vid = 1; |
9d5c8243 | 8667 | |
5faf030c | 8668 | igb_vlan_mode(adapter->netdev, adapter->netdev->features); |
5982a556 | 8669 | igb_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), 0); |
5faf030c | 8670 | |
5982a556 | 8671 | for_each_set_bit_from(vid, adapter->active_vlans, VLAN_N_VID) |
80d5c368 | 8672 | igb_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid); |
9d5c8243 AK |
8673 | } |
8674 | ||
14ad2513 | 8675 | int igb_set_spd_dplx(struct igb_adapter *adapter, u32 spd, u8 dplx) |
9d5c8243 | 8676 | { |
090b1795 | 8677 | struct pci_dev *pdev = adapter->pdev; |
9d5c8243 AK |
8678 | struct e1000_mac_info *mac = &adapter->hw.mac; |
8679 | ||
8680 | mac->autoneg = 0; | |
8681 | ||
14ad2513 | 8682 | /* Make sure dplx is at most 1 bit and lsb of speed is not set |
b980ac18 JK |
8683 | * for the switch() below to work |
8684 | */ | |
14ad2513 DD |
8685 | if ((spd & 1) || (dplx & ~1)) |
8686 | goto err_inval; | |
8687 | ||
f502ef7d AA |
8688 | /* Fiber NIC's only allow 1000 gbps Full duplex |
8689 | * and 100Mbps Full duplex for 100baseFx sfp | |
8690 | */ | |
8691 | if (adapter->hw.phy.media_type == e1000_media_type_internal_serdes) { | |
8692 | switch (spd + dplx) { | |
8693 | case SPEED_10 + DUPLEX_HALF: | |
8694 | case SPEED_10 + DUPLEX_FULL: | |
8695 | case SPEED_100 + DUPLEX_HALF: | |
8696 | goto err_inval; | |
8697 | default: | |
8698 | break; | |
8699 | } | |
8700 | } | |
cd2638a8 | 8701 | |
14ad2513 | 8702 | switch (spd + dplx) { |
9d5c8243 AK |
8703 | case SPEED_10 + DUPLEX_HALF: |
8704 | mac->forced_speed_duplex = ADVERTISE_10_HALF; | |
8705 | break; | |
8706 | case SPEED_10 + DUPLEX_FULL: | |
8707 | mac->forced_speed_duplex = ADVERTISE_10_FULL; | |
8708 | break; | |
8709 | case SPEED_100 + DUPLEX_HALF: | |
8710 | mac->forced_speed_duplex = ADVERTISE_100_HALF; | |
8711 | break; | |
8712 | case SPEED_100 + DUPLEX_FULL: | |
8713 | mac->forced_speed_duplex = ADVERTISE_100_FULL; | |
8714 | break; | |
8715 | case SPEED_1000 + DUPLEX_FULL: | |
8716 | mac->autoneg = 1; | |
8717 | adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL; | |
8718 | break; | |
8719 | case SPEED_1000 + DUPLEX_HALF: /* not supported */ | |
8720 | default: | |
14ad2513 | 8721 | goto err_inval; |
9d5c8243 | 8722 | } |
8376dad0 JB |
8723 | |
8724 | /* clear MDI, MDI(-X) override is only allowed when autoneg enabled */ | |
8725 | adapter->hw.phy.mdix = AUTO_ALL_MODES; | |
8726 | ||
9d5c8243 | 8727 | return 0; |
14ad2513 DD |
8728 | |
8729 | err_inval: | |
8730 | dev_err(&pdev->dev, "Unsupported Speed/Duplex configuration\n"); | |
8731 | return -EINVAL; | |
9d5c8243 AK |
8732 | } |
8733 | ||
749ab2cd YZ |
8734 | static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake, |
8735 | bool runtime) | |
9d5c8243 AK |
8736 | { |
8737 | struct net_device *netdev = pci_get_drvdata(pdev); | |
8738 | struct igb_adapter *adapter = netdev_priv(netdev); | |
8739 | struct e1000_hw *hw = &adapter->hw; | |
2d064c06 | 8740 | u32 ctrl, rctl, status; |
749ab2cd | 8741 | u32 wufc = runtime ? E1000_WUFC_LNKC : adapter->wol; |
dabb8338 | 8742 | bool wake; |
9d5c8243 | 8743 | |
9474933c | 8744 | rtnl_lock(); |
9d5c8243 AK |
8745 | netif_device_detach(netdev); |
8746 | ||
a88f10ec | 8747 | if (netif_running(netdev)) |
749ab2cd | 8748 | __igb_close(netdev, true); |
a88f10ec | 8749 | |
8646f7b4 JK |
8750 | igb_ptp_suspend(adapter); |
8751 | ||
047e0030 | 8752 | igb_clear_interrupt_scheme(adapter); |
9474933c | 8753 | rtnl_unlock(); |
9d5c8243 | 8754 | |
9d5c8243 AK |
8755 | status = rd32(E1000_STATUS); |
8756 | if (status & E1000_STATUS_LU) | |
8757 | wufc &= ~E1000_WUFC_LNKC; | |
8758 | ||
8759 | if (wufc) { | |
8760 | igb_setup_rctl(adapter); | |
ff41f8dc | 8761 | igb_set_rx_mode(netdev); |
9d5c8243 AK |
8762 | |
8763 | /* turn on all-multi mode if wake on multicast is enabled */ | |
8764 | if (wufc & E1000_WUFC_MC) { | |
8765 | rctl = rd32(E1000_RCTL); | |
8766 | rctl |= E1000_RCTL_MPE; | |
8767 | wr32(E1000_RCTL, rctl); | |
8768 | } | |
8769 | ||
8770 | ctrl = rd32(E1000_CTRL); | |
9d5c8243 AK |
8771 | ctrl |= E1000_CTRL_ADVD3WUC; |
8772 | wr32(E1000_CTRL, ctrl); | |
8773 | ||
9d5c8243 | 8774 | /* Allow time for pending master requests to run */ |
330a6d6a | 8775 | igb_disable_pcie_master(hw); |
9d5c8243 AK |
8776 | |
8777 | wr32(E1000_WUC, E1000_WUC_PME_EN); | |
8778 | wr32(E1000_WUFC, wufc); | |
9d5c8243 AK |
8779 | } else { |
8780 | wr32(E1000_WUC, 0); | |
8781 | wr32(E1000_WUFC, 0); | |
9d5c8243 AK |
8782 | } |
8783 | ||
dabb8338 AS |
8784 | wake = wufc || adapter->en_mng_pt; |
8785 | if (!wake) | |
88a268c1 NN |
8786 | igb_power_down_link(adapter); |
8787 | else | |
8788 | igb_power_up_link(adapter); | |
9d5c8243 | 8789 | |
dabb8338 AS |
8790 | if (enable_wake) |
8791 | *enable_wake = wake; | |
8792 | ||
9d5c8243 | 8793 | /* Release control of h/w to f/w. If f/w is AMT enabled, this |
b980ac18 JK |
8794 | * would have already happened in close and is redundant. |
8795 | */ | |
9d5c8243 AK |
8796 | igb_release_hw_control(adapter); |
8797 | ||
8798 | pci_disable_device(pdev); | |
8799 | ||
9d5c8243 AK |
8800 | return 0; |
8801 | } | |
8802 | ||
b90fa876 KTC |
8803 | static void igb_deliver_wake_packet(struct net_device *netdev) |
8804 | { | |
8805 | struct igb_adapter *adapter = netdev_priv(netdev); | |
8806 | struct e1000_hw *hw = &adapter->hw; | |
8807 | struct sk_buff *skb; | |
8808 | u32 wupl; | |
8809 | ||
8810 | wupl = rd32(E1000_WUPL) & E1000_WUPL_MASK; | |
8811 | ||
8812 | /* WUPM stores only the first 128 bytes of the wake packet. | |
8813 | * Read the packet only if we have the whole thing. | |
8814 | */ | |
8815 | if ((wupl == 0) || (wupl > E1000_WUPM_BYTES)) | |
8816 | return; | |
8817 | ||
8818 | skb = netdev_alloc_skb_ip_align(netdev, E1000_WUPM_BYTES); | |
8819 | if (!skb) | |
8820 | return; | |
8821 | ||
8822 | skb_put(skb, wupl); | |
8823 | ||
8824 | /* Ensure reads are 32-bit aligned */ | |
8825 | wupl = roundup(wupl, 4); | |
8826 | ||
8827 | memcpy_fromio(skb->data, hw->hw_addr + E1000_WUPM_REG(0), wupl); | |
8828 | ||
8829 | skb->protocol = eth_type_trans(skb, netdev); | |
8830 | netif_rx(skb); | |
8831 | } | |
8832 | ||
000ba1f2 | 8833 | static int __maybe_unused igb_suspend(struct device *dev) |
3fe7c4c9 | 8834 | { |
dabb8338 | 8835 | return __igb_shutdown(to_pci_dev(dev), NULL, 0); |
3fe7c4c9 RW |
8836 | } |
8837 | ||
000ba1f2 | 8838 | static int __maybe_unused igb_resume(struct device *dev) |
9d5c8243 | 8839 | { |
749ab2cd | 8840 | struct pci_dev *pdev = to_pci_dev(dev); |
9d5c8243 AK |
8841 | struct net_device *netdev = pci_get_drvdata(pdev); |
8842 | struct igb_adapter *adapter = netdev_priv(netdev); | |
8843 | struct e1000_hw *hw = &adapter->hw; | |
b90fa876 | 8844 | u32 err, val; |
9d5c8243 AK |
8845 | |
8846 | pci_set_power_state(pdev, PCI_D0); | |
8847 | pci_restore_state(pdev); | |
b94f2d77 | 8848 | pci_save_state(pdev); |
42bfd33a | 8849 | |
17a402a0 CW |
8850 | if (!pci_device_is_present(pdev)) |
8851 | return -ENODEV; | |
aed5dec3 | 8852 | err = pci_enable_device_mem(pdev); |
9d5c8243 AK |
8853 | if (err) { |
8854 | dev_err(&pdev->dev, | |
8855 | "igb: Cannot enable PCI device from suspend\n"); | |
8856 | return err; | |
8857 | } | |
8858 | pci_set_master(pdev); | |
8859 | ||
8860 | pci_enable_wake(pdev, PCI_D3hot, 0); | |
8861 | pci_enable_wake(pdev, PCI_D3cold, 0); | |
8862 | ||
53c7d064 | 8863 | if (igb_init_interrupt_scheme(adapter, true)) { |
a88f10ec AD |
8864 | dev_err(&pdev->dev, "Unable to allocate memory for queues\n"); |
8865 | return -ENOMEM; | |
9d5c8243 AK |
8866 | } |
8867 | ||
9d5c8243 | 8868 | igb_reset(adapter); |
a8564f03 AD |
8869 | |
8870 | /* let the f/w know that the h/w is now under the control of the | |
b980ac18 JK |
8871 | * driver. |
8872 | */ | |
a8564f03 AD |
8873 | igb_get_hw_control(adapter); |
8874 | ||
b90fa876 KTC |
8875 | val = rd32(E1000_WUS); |
8876 | if (val & WAKE_PKT_WUS) | |
8877 | igb_deliver_wake_packet(netdev); | |
8878 | ||
9d5c8243 AK |
8879 | wr32(E1000_WUS, ~0); |
8880 | ||
9474933c TF |
8881 | rtnl_lock(); |
8882 | if (!err && netif_running(netdev)) | |
749ab2cd | 8883 | err = __igb_open(netdev, true); |
9d5c8243 | 8884 | |
9474933c TF |
8885 | if (!err) |
8886 | netif_device_attach(netdev); | |
8887 | rtnl_unlock(); | |
8888 | ||
8889 | return err; | |
749ab2cd YZ |
8890 | } |
8891 | ||
000ba1f2 | 8892 | static int __maybe_unused igb_runtime_idle(struct device *dev) |
749ab2cd | 8893 | { |
5daab287 | 8894 | struct net_device *netdev = dev_get_drvdata(dev); |
749ab2cd YZ |
8895 | struct igb_adapter *adapter = netdev_priv(netdev); |
8896 | ||
8897 | if (!igb_has_link(adapter)) | |
8898 | pm_schedule_suspend(dev, MSEC_PER_SEC * 5); | |
8899 | ||
8900 | return -EBUSY; | |
8901 | } | |
8902 | ||
000ba1f2 | 8903 | static int __maybe_unused igb_runtime_suspend(struct device *dev) |
749ab2cd | 8904 | { |
dabb8338 | 8905 | return __igb_shutdown(to_pci_dev(dev), NULL, 1); |
9d5c8243 | 8906 | } |
749ab2cd | 8907 | |
000ba1f2 | 8908 | static int __maybe_unused igb_runtime_resume(struct device *dev) |
749ab2cd YZ |
8909 | { |
8910 | return igb_resume(dev); | |
8911 | } | |
9d5c8243 AK |
8912 | |
8913 | static void igb_shutdown(struct pci_dev *pdev) | |
8914 | { | |
3fe7c4c9 RW |
8915 | bool wake; |
8916 | ||
749ab2cd | 8917 | __igb_shutdown(pdev, &wake, 0); |
3fe7c4c9 RW |
8918 | |
8919 | if (system_state == SYSTEM_POWER_OFF) { | |
8920 | pci_wake_from_d3(pdev, wake); | |
8921 | pci_set_power_state(pdev, PCI_D3hot); | |
8922 | } | |
9d5c8243 AK |
8923 | } |
8924 | ||
fa44f2f1 GR |
8925 | #ifdef CONFIG_PCI_IOV |
8926 | static int igb_sriov_reinit(struct pci_dev *dev) | |
8927 | { | |
8928 | struct net_device *netdev = pci_get_drvdata(dev); | |
8929 | struct igb_adapter *adapter = netdev_priv(netdev); | |
8930 | struct pci_dev *pdev = adapter->pdev; | |
8931 | ||
8932 | rtnl_lock(); | |
8933 | ||
8934 | if (netif_running(netdev)) | |
8935 | igb_close(netdev); | |
76252723 SA |
8936 | else |
8937 | igb_reset(adapter); | |
fa44f2f1 GR |
8938 | |
8939 | igb_clear_interrupt_scheme(adapter); | |
8940 | ||
8941 | igb_init_queue_configuration(adapter); | |
8942 | ||
8943 | if (igb_init_interrupt_scheme(adapter, true)) { | |
f468adc9 | 8944 | rtnl_unlock(); |
fa44f2f1 GR |
8945 | dev_err(&pdev->dev, "Unable to allocate memory for queues\n"); |
8946 | return -ENOMEM; | |
8947 | } | |
8948 | ||
8949 | if (netif_running(netdev)) | |
8950 | igb_open(netdev); | |
8951 | ||
8952 | rtnl_unlock(); | |
8953 | ||
8954 | return 0; | |
8955 | } | |
8956 | ||
8957 | static int igb_pci_disable_sriov(struct pci_dev *dev) | |
8958 | { | |
8959 | int err = igb_disable_sriov(dev); | |
8960 | ||
8961 | if (!err) | |
8962 | err = igb_sriov_reinit(dev); | |
8963 | ||
8964 | return err; | |
8965 | } | |
8966 | ||
8967 | static int igb_pci_enable_sriov(struct pci_dev *dev, int num_vfs) | |
8968 | { | |
8969 | int err = igb_enable_sriov(dev, num_vfs); | |
8970 | ||
8971 | if (err) | |
8972 | goto out; | |
8973 | ||
8974 | err = igb_sriov_reinit(dev); | |
8975 | if (!err) | |
8976 | return num_vfs; | |
8977 | ||
8978 | out: | |
8979 | return err; | |
8980 | } | |
8981 | ||
8982 | #endif | |
8983 | static int igb_pci_sriov_configure(struct pci_dev *dev, int num_vfs) | |
8984 | { | |
8985 | #ifdef CONFIG_PCI_IOV | |
8986 | if (num_vfs == 0) | |
8987 | return igb_pci_disable_sriov(dev); | |
8988 | else | |
8989 | return igb_pci_enable_sriov(dev, num_vfs); | |
8990 | #endif | |
8991 | return 0; | |
8992 | } | |
8993 | ||
9d5c8243 | 8994 | /** |
b980ac18 JK |
8995 | * igb_io_error_detected - called when PCI error is detected |
8996 | * @pdev: Pointer to PCI device | |
8997 | * @state: The current pci connection state | |
9d5c8243 | 8998 | * |
b980ac18 JK |
8999 | * This function is called after a PCI bus error affecting |
9000 | * this device has been detected. | |
9001 | **/ | |
9d5c8243 AK |
9002 | static pci_ers_result_t igb_io_error_detected(struct pci_dev *pdev, |
9003 | pci_channel_state_t state) | |
9004 | { | |
9005 | struct net_device *netdev = pci_get_drvdata(pdev); | |
9006 | struct igb_adapter *adapter = netdev_priv(netdev); | |
9007 | ||
9008 | netif_device_detach(netdev); | |
9009 | ||
59ed6eec AD |
9010 | if (state == pci_channel_io_perm_failure) |
9011 | return PCI_ERS_RESULT_DISCONNECT; | |
9012 | ||
9d5c8243 AK |
9013 | if (netif_running(netdev)) |
9014 | igb_down(adapter); | |
9015 | pci_disable_device(pdev); | |
9016 | ||
9017 | /* Request a slot slot reset. */ | |
9018 | return PCI_ERS_RESULT_NEED_RESET; | |
9019 | } | |
9020 | ||
9021 | /** | |
b980ac18 JK |
9022 | * igb_io_slot_reset - called after the pci bus has been reset. |
9023 | * @pdev: Pointer to PCI device | |
9d5c8243 | 9024 | * |
b980ac18 JK |
9025 | * Restart the card from scratch, as if from a cold-boot. Implementation |
9026 | * resembles the first-half of the igb_resume routine. | |
9027 | **/ | |
9d5c8243 AK |
9028 | static pci_ers_result_t igb_io_slot_reset(struct pci_dev *pdev) |
9029 | { | |
9030 | struct net_device *netdev = pci_get_drvdata(pdev); | |
9031 | struct igb_adapter *adapter = netdev_priv(netdev); | |
9032 | struct e1000_hw *hw = &adapter->hw; | |
40a914fa | 9033 | pci_ers_result_t result; |
9d5c8243 | 9034 | |
aed5dec3 | 9035 | if (pci_enable_device_mem(pdev)) { |
9d5c8243 AK |
9036 | dev_err(&pdev->dev, |
9037 | "Cannot re-enable PCI device after reset.\n"); | |
40a914fa AD |
9038 | result = PCI_ERS_RESULT_DISCONNECT; |
9039 | } else { | |
9040 | pci_set_master(pdev); | |
9041 | pci_restore_state(pdev); | |
b94f2d77 | 9042 | pci_save_state(pdev); |
9d5c8243 | 9043 | |
40a914fa AD |
9044 | pci_enable_wake(pdev, PCI_D3hot, 0); |
9045 | pci_enable_wake(pdev, PCI_D3cold, 0); | |
9d5c8243 | 9046 | |
69b97cf6 GP |
9047 | /* In case of PCI error, adapter lose its HW address |
9048 | * so we should re-assign it here. | |
9049 | */ | |
9050 | hw->hw_addr = adapter->io_addr; | |
9051 | ||
40a914fa AD |
9052 | igb_reset(adapter); |
9053 | wr32(E1000_WUS, ~0); | |
9054 | result = PCI_ERS_RESULT_RECOVERED; | |
9055 | } | |
9d5c8243 | 9056 | |
40a914fa | 9057 | return result; |
9d5c8243 AK |
9058 | } |
9059 | ||
9060 | /** | |
b980ac18 JK |
9061 | * igb_io_resume - called when traffic can start flowing again. |
9062 | * @pdev: Pointer to PCI device | |
9d5c8243 | 9063 | * |
b980ac18 JK |
9064 | * This callback is called when the error recovery driver tells us that |
9065 | * its OK to resume normal operation. Implementation resembles the | |
9066 | * second-half of the igb_resume routine. | |
9d5c8243 AK |
9067 | */ |
9068 | static void igb_io_resume(struct pci_dev *pdev) | |
9069 | { | |
9070 | struct net_device *netdev = pci_get_drvdata(pdev); | |
9071 | struct igb_adapter *adapter = netdev_priv(netdev); | |
9072 | ||
9d5c8243 AK |
9073 | if (netif_running(netdev)) { |
9074 | if (igb_up(adapter)) { | |
9075 | dev_err(&pdev->dev, "igb_up failed after reset\n"); | |
9076 | return; | |
9077 | } | |
9078 | } | |
9079 | ||
9080 | netif_device_attach(netdev); | |
9081 | ||
9082 | /* let the f/w know that the h/w is now under the control of the | |
b980ac18 JK |
9083 | * driver. |
9084 | */ | |
9d5c8243 | 9085 | igb_get_hw_control(adapter); |
9d5c8243 AK |
9086 | } |
9087 | ||
83c21335 YK |
9088 | /** |
9089 | * igb_rar_set_index - Sync RAL[index] and RAH[index] registers with MAC table | |
9090 | * @adapter: Pointer to adapter structure | |
9091 | * @index: Index of the RAR entry which need to be synced with MAC table | |
9092 | **/ | |
9093 | static void igb_rar_set_index(struct igb_adapter *adapter, u32 index) | |
26ad9178 | 9094 | { |
26ad9178 | 9095 | struct e1000_hw *hw = &adapter->hw; |
c3278587 | 9096 | u32 rar_low, rar_high; |
83c21335 | 9097 | u8 *addr = adapter->mac_table[index].addr; |
26ad9178 | 9098 | |
415cd2a6 AD |
9099 | /* HW expects these to be in network order when they are plugged |
9100 | * into the registers which are little endian. In order to guarantee | |
9101 | * that ordering we need to do an leXX_to_cpup here in order to be | |
9102 | * ready for the byteswap that occurs with writel | |
26ad9178 | 9103 | */ |
415cd2a6 AD |
9104 | rar_low = le32_to_cpup((__le32 *)(addr)); |
9105 | rar_high = le16_to_cpup((__le16 *)(addr + 4)); | |
26ad9178 AD |
9106 | |
9107 | /* Indicate to hardware the Address is Valid. */ | |
83c21335 | 9108 | if (adapter->mac_table[index].state & IGB_MAC_STATE_IN_USE) { |
177132df CV |
9109 | if (is_valid_ether_addr(addr)) |
9110 | rar_high |= E1000_RAH_AV; | |
26ad9178 | 9111 | |
1d717cf4 VCG |
9112 | if (adapter->mac_table[index].state & IGB_MAC_STATE_SRC_ADDR) |
9113 | rar_high |= E1000_RAH_ASEL_SRC_ADDR; | |
9114 | ||
4dc93fcf VCG |
9115 | switch (hw->mac.type) { |
9116 | case e1000_82575: | |
9117 | case e1000_i210: | |
0a823899 VCG |
9118 | if (adapter->mac_table[index].state & |
9119 | IGB_MAC_STATE_QUEUE_STEERING) | |
9120 | rar_high |= E1000_RAH_QSEL_ENABLE; | |
9121 | ||
83c21335 YK |
9122 | rar_high |= E1000_RAH_POOL_1 * |
9123 | adapter->mac_table[index].queue; | |
4dc93fcf VCG |
9124 | break; |
9125 | default: | |
83c21335 YK |
9126 | rar_high |= E1000_RAH_POOL_1 << |
9127 | adapter->mac_table[index].queue; | |
4dc93fcf VCG |
9128 | break; |
9129 | } | |
83c21335 | 9130 | } |
26ad9178 AD |
9131 | |
9132 | wr32(E1000_RAL(index), rar_low); | |
9133 | wrfl(); | |
9134 | wr32(E1000_RAH(index), rar_high); | |
9135 | wrfl(); | |
9136 | } | |
9137 | ||
4ae196df | 9138 | static int igb_set_vf_mac(struct igb_adapter *adapter, |
b980ac18 | 9139 | int vf, unsigned char *mac_addr) |
4ae196df AD |
9140 | { |
9141 | struct e1000_hw *hw = &adapter->hw; | |
ff41f8dc | 9142 | /* VF MAC addresses start at end of receive addresses and moves |
b980ac18 JK |
9143 | * towards the first, as a result a collision should not be possible |
9144 | */ | |
ff41f8dc | 9145 | int rar_entry = hw->mac.rar_entry_count - (vf + 1); |
83c21335 | 9146 | unsigned char *vf_mac_addr = adapter->vf_data[vf].vf_mac_addresses; |
4ae196df | 9147 | |
83c21335 YK |
9148 | ether_addr_copy(vf_mac_addr, mac_addr); |
9149 | ether_addr_copy(adapter->mac_table[rar_entry].addr, mac_addr); | |
9150 | adapter->mac_table[rar_entry].queue = vf; | |
9151 | adapter->mac_table[rar_entry].state |= IGB_MAC_STATE_IN_USE; | |
9152 | igb_rar_set_index(adapter, rar_entry); | |
4ae196df AD |
9153 | |
9154 | return 0; | |
9155 | } | |
9156 | ||
8151d294 WM |
9157 | static int igb_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac) |
9158 | { | |
9159 | struct igb_adapter *adapter = netdev_priv(netdev); | |
177132df CV |
9160 | |
9161 | if (vf >= adapter->vfs_allocated_count) | |
9162 | return -EINVAL; | |
9163 | ||
9164 | /* Setting the VF MAC to 0 reverts the IGB_VF_FLAG_PF_SET_MAC | |
9165 | * flag and allows to overwrite the MAC via VF netdev. This | |
9166 | * is necessary to allow libvirt a way to restore the original | |
9167 | * MAC after unbinding vfio-pci and reloading igbvf after shutting | |
9168 | * down a VM. | |
9169 | */ | |
9170 | if (is_zero_ether_addr(mac)) { | |
9171 | adapter->vf_data[vf].flags &= ~IGB_VF_FLAG_PF_SET_MAC; | |
9172 | dev_info(&adapter->pdev->dev, | |
9173 | "remove administratively set MAC on VF %d\n", | |
9174 | vf); | |
9175 | } else if (is_valid_ether_addr(mac)) { | |
9176 | adapter->vf_data[vf].flags |= IGB_VF_FLAG_PF_SET_MAC; | |
9177 | dev_info(&adapter->pdev->dev, "setting MAC %pM on VF %d\n", | |
9178 | mac, vf); | |
9179 | dev_info(&adapter->pdev->dev, | |
9180 | "Reload the VF driver to make this change effective."); | |
9181 | /* Generate additional warning if PF is down */ | |
9182 | if (test_bit(__IGB_DOWN, &adapter->state)) { | |
9183 | dev_warn(&adapter->pdev->dev, | |
9184 | "The VF MAC address has been set, but the PF device is not up.\n"); | |
9185 | dev_warn(&adapter->pdev->dev, | |
9186 | "Bring the PF device up before attempting to use the VF device.\n"); | |
9187 | } | |
9188 | } else { | |
8151d294 | 9189 | return -EINVAL; |
8151d294 WM |
9190 | } |
9191 | return igb_set_vf_mac(adapter, vf, mac); | |
9192 | } | |
9193 | ||
17dc566c LL |
9194 | static int igb_link_mbps(int internal_link_speed) |
9195 | { | |
9196 | switch (internal_link_speed) { | |
9197 | case SPEED_100: | |
9198 | return 100; | |
9199 | case SPEED_1000: | |
9200 | return 1000; | |
9201 | default: | |
9202 | return 0; | |
9203 | } | |
9204 | } | |
9205 | ||
9206 | static void igb_set_vf_rate_limit(struct e1000_hw *hw, int vf, int tx_rate, | |
9207 | int link_speed) | |
9208 | { | |
9209 | int rf_dec, rf_int; | |
9210 | u32 bcnrc_val; | |
9211 | ||
9212 | if (tx_rate != 0) { | |
9213 | /* Calculate the rate factor values to set */ | |
9214 | rf_int = link_speed / tx_rate; | |
9215 | rf_dec = (link_speed - (rf_int * tx_rate)); | |
a51d8c21 | 9216 | rf_dec = (rf_dec * BIT(E1000_RTTBCNRC_RF_INT_SHIFT)) / |
b980ac18 | 9217 | tx_rate; |
17dc566c LL |
9218 | |
9219 | bcnrc_val = E1000_RTTBCNRC_RS_ENA; | |
b980ac18 JK |
9220 | bcnrc_val |= ((rf_int << E1000_RTTBCNRC_RF_INT_SHIFT) & |
9221 | E1000_RTTBCNRC_RF_INT_MASK); | |
17dc566c LL |
9222 | bcnrc_val |= (rf_dec & E1000_RTTBCNRC_RF_DEC_MASK); |
9223 | } else { | |
9224 | bcnrc_val = 0; | |
9225 | } | |
9226 | ||
9227 | wr32(E1000_RTTDQSEL, vf); /* vf X uses queue X */ | |
b980ac18 | 9228 | /* Set global transmit compensation time to the MMW_SIZE in RTTBCNRM |
f00b0da7 LL |
9229 | * register. MMW_SIZE=0x014 if 9728-byte jumbo is supported. |
9230 | */ | |
9231 | wr32(E1000_RTTBCNRM, 0x14); | |
17dc566c LL |
9232 | wr32(E1000_RTTBCNRC, bcnrc_val); |
9233 | } | |
9234 | ||
9235 | static void igb_check_vf_rate_limit(struct igb_adapter *adapter) | |
9236 | { | |
9237 | int actual_link_speed, i; | |
9238 | bool reset_rate = false; | |
9239 | ||
9240 | /* VF TX rate limit was not set or not supported */ | |
9241 | if ((adapter->vf_rate_link_speed == 0) || | |
9242 | (adapter->hw.mac.type != e1000_82576)) | |
9243 | return; | |
9244 | ||
9245 | actual_link_speed = igb_link_mbps(adapter->link_speed); | |
9246 | if (actual_link_speed != adapter->vf_rate_link_speed) { | |
9247 | reset_rate = true; | |
9248 | adapter->vf_rate_link_speed = 0; | |
9249 | dev_info(&adapter->pdev->dev, | |
b980ac18 | 9250 | "Link speed has been changed. VF Transmit rate is disabled\n"); |
17dc566c LL |
9251 | } |
9252 | ||
9253 | for (i = 0; i < adapter->vfs_allocated_count; i++) { | |
9254 | if (reset_rate) | |
9255 | adapter->vf_data[i].tx_rate = 0; | |
9256 | ||
9257 | igb_set_vf_rate_limit(&adapter->hw, i, | |
b980ac18 JK |
9258 | adapter->vf_data[i].tx_rate, |
9259 | actual_link_speed); | |
17dc566c LL |
9260 | } |
9261 | } | |
9262 | ||
ed616689 SC |
9263 | static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf, |
9264 | int min_tx_rate, int max_tx_rate) | |
8151d294 | 9265 | { |
17dc566c LL |
9266 | struct igb_adapter *adapter = netdev_priv(netdev); |
9267 | struct e1000_hw *hw = &adapter->hw; | |
9268 | int actual_link_speed; | |
9269 | ||
9270 | if (hw->mac.type != e1000_82576) | |
9271 | return -EOPNOTSUPP; | |
9272 | ||
ed616689 SC |
9273 | if (min_tx_rate) |
9274 | return -EINVAL; | |
9275 | ||
17dc566c LL |
9276 | actual_link_speed = igb_link_mbps(adapter->link_speed); |
9277 | if ((vf >= adapter->vfs_allocated_count) || | |
9278 | (!(rd32(E1000_STATUS) & E1000_STATUS_LU)) || | |
ed616689 SC |
9279 | (max_tx_rate < 0) || |
9280 | (max_tx_rate > actual_link_speed)) | |
17dc566c LL |
9281 | return -EINVAL; |
9282 | ||
9283 | adapter->vf_rate_link_speed = actual_link_speed; | |
ed616689 SC |
9284 | adapter->vf_data[vf].tx_rate = (u16)max_tx_rate; |
9285 | igb_set_vf_rate_limit(hw, vf, max_tx_rate, actual_link_speed); | |
17dc566c LL |
9286 | |
9287 | return 0; | |
8151d294 WM |
9288 | } |
9289 | ||
70ea4783 LL |
9290 | static int igb_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, |
9291 | bool setting) | |
9292 | { | |
9293 | struct igb_adapter *adapter = netdev_priv(netdev); | |
9294 | struct e1000_hw *hw = &adapter->hw; | |
9295 | u32 reg_val, reg_offset; | |
9296 | ||
9297 | if (!adapter->vfs_allocated_count) | |
9298 | return -EOPNOTSUPP; | |
9299 | ||
9300 | if (vf >= adapter->vfs_allocated_count) | |
9301 | return -EINVAL; | |
9302 | ||
9303 | reg_offset = (hw->mac.type == e1000_82576) ? E1000_DTXSWC : E1000_TXSWC; | |
9304 | reg_val = rd32(reg_offset); | |
9305 | if (setting) | |
a51d8c21 JK |
9306 | reg_val |= (BIT(vf) | |
9307 | BIT(vf + E1000_DTXSWC_VLAN_SPOOF_SHIFT)); | |
70ea4783 | 9308 | else |
a51d8c21 JK |
9309 | reg_val &= ~(BIT(vf) | |
9310 | BIT(vf + E1000_DTXSWC_VLAN_SPOOF_SHIFT)); | |
70ea4783 LL |
9311 | wr32(reg_offset, reg_val); |
9312 | ||
9313 | adapter->vf_data[vf].spoofchk_enabled = setting; | |
23d87824 | 9314 | return 0; |
70ea4783 LL |
9315 | } |
9316 | ||
1b8b062a CV |
9317 | static int igb_ndo_set_vf_trust(struct net_device *netdev, int vf, bool setting) |
9318 | { | |
9319 | struct igb_adapter *adapter = netdev_priv(netdev); | |
9320 | ||
9321 | if (vf >= adapter->vfs_allocated_count) | |
9322 | return -EINVAL; | |
9323 | if (adapter->vf_data[vf].trusted == setting) | |
9324 | return 0; | |
9325 | ||
9326 | adapter->vf_data[vf].trusted = setting; | |
9327 | ||
9328 | dev_info(&adapter->pdev->dev, "VF %u is %strusted\n", | |
9329 | vf, setting ? "" : "not "); | |
9330 | return 0; | |
9331 | } | |
9332 | ||
8151d294 WM |
9333 | static int igb_ndo_get_vf_config(struct net_device *netdev, |
9334 | int vf, struct ifla_vf_info *ivi) | |
9335 | { | |
9336 | struct igb_adapter *adapter = netdev_priv(netdev); | |
9337 | if (vf >= adapter->vfs_allocated_count) | |
9338 | return -EINVAL; | |
9339 | ivi->vf = vf; | |
9340 | memcpy(&ivi->mac, adapter->vf_data[vf].vf_mac_addresses, ETH_ALEN); | |
ed616689 SC |
9341 | ivi->max_tx_rate = adapter->vf_data[vf].tx_rate; |
9342 | ivi->min_tx_rate = 0; | |
8151d294 WM |
9343 | ivi->vlan = adapter->vf_data[vf].pf_vlan; |
9344 | ivi->qos = adapter->vf_data[vf].pf_qos; | |
70ea4783 | 9345 | ivi->spoofchk = adapter->vf_data[vf].spoofchk_enabled; |
1b8b062a | 9346 | ivi->trusted = adapter->vf_data[vf].trusted; |
8151d294 WM |
9347 | return 0; |
9348 | } | |
9349 | ||
4ae196df AD |
9350 | static void igb_vmm_control(struct igb_adapter *adapter) |
9351 | { | |
9352 | struct e1000_hw *hw = &adapter->hw; | |
10d8e907 | 9353 | u32 reg; |
4ae196df | 9354 | |
52a1dd4d AD |
9355 | switch (hw->mac.type) { |
9356 | case e1000_82575: | |
f96a8a0b CW |
9357 | case e1000_i210: |
9358 | case e1000_i211: | |
ceb5f13b | 9359 | case e1000_i354: |
52a1dd4d AD |
9360 | default: |
9361 | /* replication is not supported for 82575 */ | |
4ae196df | 9362 | return; |
52a1dd4d AD |
9363 | case e1000_82576: |
9364 | /* notify HW that the MAC is adding vlan tags */ | |
9365 | reg = rd32(E1000_DTXCTL); | |
9366 | reg |= E1000_DTXCTL_VLAN_ADDED; | |
9367 | wr32(E1000_DTXCTL, reg); | |
b26141d4 | 9368 | /* Fall through */ |
52a1dd4d AD |
9369 | case e1000_82580: |
9370 | /* enable replication vlan tag stripping */ | |
9371 | reg = rd32(E1000_RPLOLR); | |
9372 | reg |= E1000_RPLOLR_STRVLAN; | |
9373 | wr32(E1000_RPLOLR, reg); | |
b26141d4 | 9374 | /* Fall through */ |
d2ba2ed8 AD |
9375 | case e1000_i350: |
9376 | /* none of the above registers are supported by i350 */ | |
52a1dd4d AD |
9377 | break; |
9378 | } | |
10d8e907 | 9379 | |
d4960307 AD |
9380 | if (adapter->vfs_allocated_count) { |
9381 | igb_vmdq_set_loopback_pf(hw, true); | |
9382 | igb_vmdq_set_replication_pf(hw, true); | |
13800469 | 9383 | igb_vmdq_set_anti_spoofing_pf(hw, true, |
b980ac18 | 9384 | adapter->vfs_allocated_count); |
d4960307 AD |
9385 | } else { |
9386 | igb_vmdq_set_loopback_pf(hw, false); | |
9387 | igb_vmdq_set_replication_pf(hw, false); | |
9388 | } | |
4ae196df AD |
9389 | } |
9390 | ||
b6e0c419 CW |
9391 | static void igb_init_dmac(struct igb_adapter *adapter, u32 pba) |
9392 | { | |
9393 | struct e1000_hw *hw = &adapter->hw; | |
9394 | u32 dmac_thr; | |
9395 | u16 hwm; | |
9396 | ||
9397 | if (hw->mac.type > e1000_82580) { | |
9398 | if (adapter->flags & IGB_FLAG_DMAC) { | |
9399 | u32 reg; | |
9400 | ||
9401 | /* force threshold to 0. */ | |
9402 | wr32(E1000_DMCTXTH, 0); | |
9403 | ||
b980ac18 | 9404 | /* DMA Coalescing high water mark needs to be greater |
e8c626e9 MV |
9405 | * than the Rx threshold. Set hwm to PBA - max frame |
9406 | * size in 16B units, capping it at PBA - 6KB. | |
b6e0c419 | 9407 | */ |
45693bcb | 9408 | hwm = 64 * (pba - 6); |
e8c626e9 MV |
9409 | reg = rd32(E1000_FCRTC); |
9410 | reg &= ~E1000_FCRTC_RTH_COAL_MASK; | |
9411 | reg |= ((hwm << E1000_FCRTC_RTH_COAL_SHIFT) | |
9412 | & E1000_FCRTC_RTH_COAL_MASK); | |
9413 | wr32(E1000_FCRTC, reg); | |
9414 | ||
b980ac18 | 9415 | /* Set the DMA Coalescing Rx threshold to PBA - 2 * max |
e8c626e9 MV |
9416 | * frame size, capping it at PBA - 10KB. |
9417 | */ | |
45693bcb | 9418 | dmac_thr = pba - 10; |
b6e0c419 CW |
9419 | reg = rd32(E1000_DMACR); |
9420 | reg &= ~E1000_DMACR_DMACTHR_MASK; | |
b6e0c419 CW |
9421 | reg |= ((dmac_thr << E1000_DMACR_DMACTHR_SHIFT) |
9422 | & E1000_DMACR_DMACTHR_MASK); | |
9423 | ||
9424 | /* transition to L0x or L1 if available..*/ | |
9425 | reg |= (E1000_DMACR_DMAC_EN | E1000_DMACR_DMAC_LX_MASK); | |
9426 | ||
9427 | /* watchdog timer= +-1000 usec in 32usec intervals */ | |
9428 | reg |= (1000 >> 5); | |
0c02dd98 MV |
9429 | |
9430 | /* Disable BMC-to-OS Watchdog Enable */ | |
ceb5f13b CW |
9431 | if (hw->mac.type != e1000_i354) |
9432 | reg &= ~E1000_DMACR_DC_BMC2OSW_EN; | |
9433 | ||
b6e0c419 CW |
9434 | wr32(E1000_DMACR, reg); |
9435 | ||
b980ac18 | 9436 | /* no lower threshold to disable |
b6e0c419 CW |
9437 | * coalescing(smart fifb)-UTRESH=0 |
9438 | */ | |
9439 | wr32(E1000_DMCRTRH, 0); | |
b6e0c419 CW |
9440 | |
9441 | reg = (IGB_DMCTLX_DCFLUSH_DIS | 0x4); | |
9442 | ||
9443 | wr32(E1000_DMCTLX, reg); | |
9444 | ||
b980ac18 | 9445 | /* free space in tx packet buffer to wake from |
b6e0c419 CW |
9446 | * DMA coal |
9447 | */ | |
9448 | wr32(E1000_DMCTXTH, (IGB_MIN_TXPBSIZE - | |
9449 | (IGB_TX_BUF_4096 + adapter->max_frame_size)) >> 6); | |
9450 | ||
b980ac18 | 9451 | /* make low power state decision controlled |
b6e0c419 CW |
9452 | * by DMA coal |
9453 | */ | |
9454 | reg = rd32(E1000_PCIEMISC); | |
9455 | reg &= ~E1000_PCIEMISC_LX_DECISION; | |
9456 | wr32(E1000_PCIEMISC, reg); | |
9457 | } /* endif adapter->dmac is not disabled */ | |
9458 | } else if (hw->mac.type == e1000_82580) { | |
9459 | u32 reg = rd32(E1000_PCIEMISC); | |
9005df38 | 9460 | |
b6e0c419 CW |
9461 | wr32(E1000_PCIEMISC, reg & ~E1000_PCIEMISC_LX_DECISION); |
9462 | wr32(E1000_DMACR, 0); | |
9463 | } | |
9464 | } | |
9465 | ||
b980ac18 JK |
9466 | /** |
9467 | * igb_read_i2c_byte - Reads 8 bit word over I2C | |
441fc6fd CW |
9468 | * @hw: pointer to hardware structure |
9469 | * @byte_offset: byte offset to read | |
9470 | * @dev_addr: device address | |
9471 | * @data: value read | |
9472 | * | |
9473 | * Performs byte read operation over I2C interface at | |
9474 | * a specified device address. | |
b980ac18 | 9475 | **/ |
441fc6fd | 9476 | s32 igb_read_i2c_byte(struct e1000_hw *hw, u8 byte_offset, |
b980ac18 | 9477 | u8 dev_addr, u8 *data) |
441fc6fd CW |
9478 | { |
9479 | struct igb_adapter *adapter = container_of(hw, struct igb_adapter, hw); | |
603e86fa | 9480 | struct i2c_client *this_client = adapter->i2c_client; |
441fc6fd CW |
9481 | s32 status; |
9482 | u16 swfw_mask = 0; | |
9483 | ||
9484 | if (!this_client) | |
9485 | return E1000_ERR_I2C; | |
9486 | ||
9487 | swfw_mask = E1000_SWFW_PHY0_SM; | |
9488 | ||
23d87824 | 9489 | if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask)) |
441fc6fd CW |
9490 | return E1000_ERR_SWFW_SYNC; |
9491 | ||
9492 | status = i2c_smbus_read_byte_data(this_client, byte_offset); | |
9493 | hw->mac.ops.release_swfw_sync(hw, swfw_mask); | |
9494 | ||
9495 | if (status < 0) | |
9496 | return E1000_ERR_I2C; | |
9497 | else { | |
9498 | *data = status; | |
23d87824 | 9499 | return 0; |
441fc6fd CW |
9500 | } |
9501 | } | |
9502 | ||
b980ac18 JK |
9503 | /** |
9504 | * igb_write_i2c_byte - Writes 8 bit word over I2C | |
441fc6fd CW |
9505 | * @hw: pointer to hardware structure |
9506 | * @byte_offset: byte offset to write | |
9507 | * @dev_addr: device address | |
9508 | * @data: value to write | |
9509 | * | |
9510 | * Performs byte write operation over I2C interface at | |
9511 | * a specified device address. | |
b980ac18 | 9512 | **/ |
441fc6fd | 9513 | s32 igb_write_i2c_byte(struct e1000_hw *hw, u8 byte_offset, |
b980ac18 | 9514 | u8 dev_addr, u8 data) |
441fc6fd CW |
9515 | { |
9516 | struct igb_adapter *adapter = container_of(hw, struct igb_adapter, hw); | |
603e86fa | 9517 | struct i2c_client *this_client = adapter->i2c_client; |
441fc6fd CW |
9518 | s32 status; |
9519 | u16 swfw_mask = E1000_SWFW_PHY0_SM; | |
9520 | ||
9521 | if (!this_client) | |
9522 | return E1000_ERR_I2C; | |
9523 | ||
23d87824 | 9524 | if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask)) |
441fc6fd CW |
9525 | return E1000_ERR_SWFW_SYNC; |
9526 | status = i2c_smbus_write_byte_data(this_client, byte_offset, data); | |
9527 | hw->mac.ops.release_swfw_sync(hw, swfw_mask); | |
9528 | ||
9529 | if (status) | |
9530 | return E1000_ERR_I2C; | |
9531 | else | |
23d87824 | 9532 | return 0; |
441fc6fd CW |
9533 | |
9534 | } | |
907b7835 LMV |
9535 | |
9536 | int igb_reinit_queues(struct igb_adapter *adapter) | |
9537 | { | |
9538 | struct net_device *netdev = adapter->netdev; | |
9539 | struct pci_dev *pdev = adapter->pdev; | |
9540 | int err = 0; | |
9541 | ||
9542 | if (netif_running(netdev)) | |
9543 | igb_close(netdev); | |
9544 | ||
02ef6e1d | 9545 | igb_reset_interrupt_capability(adapter); |
907b7835 LMV |
9546 | |
9547 | if (igb_init_interrupt_scheme(adapter, true)) { | |
9548 | dev_err(&pdev->dev, "Unable to allocate memory for queues\n"); | |
9549 | return -ENOMEM; | |
9550 | } | |
9551 | ||
9552 | if (netif_running(netdev)) | |
9553 | err = igb_open(netdev); | |
9554 | ||
9555 | return err; | |
9556 | } | |
0e71def2 GH |
9557 | |
9558 | static void igb_nfc_filter_exit(struct igb_adapter *adapter) | |
9559 | { | |
9560 | struct igb_nfc_filter *rule; | |
9561 | ||
9562 | spin_lock(&adapter->nfc_lock); | |
9563 | ||
9564 | hlist_for_each_entry(rule, &adapter->nfc_filter_list, nfc_node) | |
9565 | igb_erase_filter(adapter, rule); | |
9566 | ||
e086be9a VCG |
9567 | hlist_for_each_entry(rule, &adapter->cls_flower_list, nfc_node) |
9568 | igb_erase_filter(adapter, rule); | |
9569 | ||
0e71def2 GH |
9570 | spin_unlock(&adapter->nfc_lock); |
9571 | } | |
9572 | ||
9573 | static void igb_nfc_filter_restore(struct igb_adapter *adapter) | |
9574 | { | |
9575 | struct igb_nfc_filter *rule; | |
9576 | ||
9577 | spin_lock(&adapter->nfc_lock); | |
9578 | ||
9579 | hlist_for_each_entry(rule, &adapter->nfc_filter_list, nfc_node) | |
9580 | igb_add_filter(adapter, rule); | |
9581 | ||
9582 | spin_unlock(&adapter->nfc_lock); | |
9583 | } | |
9d5c8243 | 9584 | /* igb_main.c */ |