i40e: re-enable PTP L4 capabilities for XL710 if FW >6.0
[linux-2.6-block.git] / drivers / net / ethernet / intel / i40e / i40e_main.c
CommitLineData
41c445ff
JB
1/*******************************************************************************
2 *
3 * Intel Ethernet Controller XL710 Family Linux Driver
4fc8c676 4 * Copyright(c) 2013 - 2017 Intel Corporation.
41c445ff
JB
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
dc641b73
GR
15 * You should have received a copy of the GNU General Public License along
16 * with this program. If not, see <http://www.gnu.org/licenses/>.
41c445ff
JB
17 *
18 * The full GNU General Public License is included in this distribution in
19 * the file called "COPYING".
20 *
21 * Contact Information:
22 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 ******************************************************************************/
26
b499ffb0
SV
27#include <linux/etherdevice.h>
28#include <linux/of_net.h>
29#include <linux/pci.h>
0c8493d9 30#include <linux/bpf.h>
b499ffb0 31
41c445ff
JB
32/* Local includes */
33#include "i40e.h"
4eb3f768 34#include "i40e_diag.h"
06a5f7f1 35#include <net/udp_tunnel.h>
ed0980c4
SP
36/* All i40e tracepoints are defined by the include below, which
37 * must be included exactly once across the whole kernel with
38 * CREATE_TRACE_POINTS defined
39 */
40#define CREATE_TRACE_POINTS
41#include "i40e_trace.h"
41c445ff
JB
42
43const char i40e_driver_name[] = "i40e";
44static const char i40e_driver_string[] =
45 "Intel(R) Ethernet Connection XL710 Network Driver";
46
47#define DRV_KERN "-k"
48
15990832
BP
49#define DRV_VERSION_MAJOR 2
50#define DRV_VERSION_MINOR 1
2318b401 51#define DRV_VERSION_BUILD 14
41c445ff
JB
52#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
53 __stringify(DRV_VERSION_MINOR) "." \
54 __stringify(DRV_VERSION_BUILD) DRV_KERN
55const char i40e_driver_version_str[] = DRV_VERSION;
8fb905b3 56static const char i40e_copyright[] = "Copyright (c) 2013 - 2014 Intel Corporation.";
41c445ff
JB
57
58/* a bit of forward declarations */
59static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi);
373149fc 60static void i40e_handle_reset_warning(struct i40e_pf *pf, bool lock_acquired);
41c445ff
JB
61static int i40e_add_vsi(struct i40e_vsi *vsi);
62static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi);
bc7d338f 63static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit);
41c445ff
JB
64static int i40e_setup_misc_vector(struct i40e_pf *pf);
65static void i40e_determine_queue_usage(struct i40e_pf *pf);
66static int i40e_setup_pf_filter_control(struct i40e_pf *pf);
373149fc
MS
67static void i40e_prep_for_reset(struct i40e_pf *pf, bool lock_acquired);
68static int i40e_reset(struct i40e_pf *pf);
69static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired);
cbf61325 70static void i40e_fdir_sb_setup(struct i40e_pf *pf);
4e3b35b0 71static int i40e_veb_get_bw_info(struct i40e_veb *veb);
41c445ff
JB
72
73/* i40e_pci_tbl - PCI Device ID Table
74 *
75 * Last entry must be all 0s
76 *
77 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
78 * Class, Class Mask, private data (not used) }
79 */
9baa3c34 80static const struct pci_device_id i40e_pci_tbl[] = {
ab60085e 81 {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_XL710), 0},
ab60085e 82 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QEMU), 0},
ab60085e
SN
83 {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_B), 0},
84 {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_C), 0},
ab60085e
SN
85 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_A), 0},
86 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_B), 0},
87 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_C), 0},
5960d33f 88 {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T), 0},
bc5166b9 89 {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T4), 0},
35dae51d
ASJ
90 {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_X722), 0},
91 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_X722), 0},
87e6c1d7
ASJ
92 {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_X722), 0},
93 {PCI_VDEVICE(INTEL, I40E_DEV_ID_1G_BASE_T_X722), 0},
94 {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T_X722), 0},
d6bf58c2 95 {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_I_X722), 0},
48a3b512
SN
96 {PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2), 0},
97 {PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2_A), 0},
3123237a
CW
98 {PCI_VDEVICE(INTEL, I40E_DEV_ID_25G_B), 0},
99 {PCI_VDEVICE(INTEL, I40E_DEV_ID_25G_SFP28), 0},
41c445ff
JB
100 /* required last entry */
101 {0, }
102};
103MODULE_DEVICE_TABLE(pci, i40e_pci_tbl);
104
105#define I40E_MAX_VF_COUNT 128
106static int debug = -1;
5d4ca23e
AD
107module_param(debug, uint, 0);
108MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all), Debug mask (0x8XXXXXXX)");
41c445ff
JB
109
110MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
111MODULE_DESCRIPTION("Intel(R) Ethernet Connection XL710 Network Driver");
112MODULE_LICENSE("GPL");
113MODULE_VERSION(DRV_VERSION);
114
2803b16c
JB
115static struct workqueue_struct *i40e_wq;
116
41c445ff
JB
117/**
118 * i40e_allocate_dma_mem_d - OS specific memory alloc for shared code
119 * @hw: pointer to the HW structure
120 * @mem: ptr to mem struct to fill out
121 * @size: size of memory requested
122 * @alignment: what to align the allocation to
123 **/
124int i40e_allocate_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem,
125 u64 size, u32 alignment)
126{
127 struct i40e_pf *pf = (struct i40e_pf *)hw->back;
128
129 mem->size = ALIGN(size, alignment);
130 mem->va = dma_zalloc_coherent(&pf->pdev->dev, mem->size,
131 &mem->pa, GFP_KERNEL);
93bc73b8
JB
132 if (!mem->va)
133 return -ENOMEM;
41c445ff 134
93bc73b8 135 return 0;
41c445ff
JB
136}
137
138/**
139 * i40e_free_dma_mem_d - OS specific memory free for shared code
140 * @hw: pointer to the HW structure
141 * @mem: ptr to mem struct to free
142 **/
143int i40e_free_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem)
144{
145 struct i40e_pf *pf = (struct i40e_pf *)hw->back;
146
147 dma_free_coherent(&pf->pdev->dev, mem->size, mem->va, mem->pa);
148 mem->va = NULL;
149 mem->pa = 0;
150 mem->size = 0;
151
152 return 0;
153}
154
155/**
156 * i40e_allocate_virt_mem_d - OS specific memory alloc for shared code
157 * @hw: pointer to the HW structure
158 * @mem: ptr to mem struct to fill out
159 * @size: size of memory requested
160 **/
161int i40e_allocate_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem,
162 u32 size)
163{
164 mem->size = size;
165 mem->va = kzalloc(size, GFP_KERNEL);
166
93bc73b8
JB
167 if (!mem->va)
168 return -ENOMEM;
41c445ff 169
93bc73b8 170 return 0;
41c445ff
JB
171}
172
173/**
174 * i40e_free_virt_mem_d - OS specific memory free for shared code
175 * @hw: pointer to the HW structure
176 * @mem: ptr to mem struct to free
177 **/
178int i40e_free_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem)
179{
180 /* it's ok to kfree a NULL pointer */
181 kfree(mem->va);
182 mem->va = NULL;
183 mem->size = 0;
184
185 return 0;
186}
187
188/**
189 * i40e_get_lump - find a lump of free generic resource
190 * @pf: board private structure
191 * @pile: the pile of resource to search
192 * @needed: the number of items needed
193 * @id: an owner id to stick on the items assigned
194 *
195 * Returns the base item index of the lump, or negative for error
196 *
197 * The search_hint trick and lack of advanced fit-finding only work
198 * because we're highly likely to have all the same size lump requests.
199 * Linear search time and any fragmentation should be minimal.
200 **/
201static int i40e_get_lump(struct i40e_pf *pf, struct i40e_lump_tracking *pile,
202 u16 needed, u16 id)
203{
204 int ret = -ENOMEM;
ddf434ac 205 int i, j;
41c445ff
JB
206
207 if (!pile || needed == 0 || id >= I40E_PILE_VALID_BIT) {
208 dev_info(&pf->pdev->dev,
209 "param err: pile=%p needed=%d id=0x%04x\n",
210 pile, needed, id);
211 return -EINVAL;
212 }
213
214 /* start the linear search with an imperfect hint */
215 i = pile->search_hint;
ddf434ac 216 while (i < pile->num_entries) {
41c445ff
JB
217 /* skip already allocated entries */
218 if (pile->list[i] & I40E_PILE_VALID_BIT) {
219 i++;
220 continue;
221 }
222
223 /* do we have enough in this lump? */
224 for (j = 0; (j < needed) && ((i+j) < pile->num_entries); j++) {
225 if (pile->list[i+j] & I40E_PILE_VALID_BIT)
226 break;
227 }
228
229 if (j == needed) {
230 /* there was enough, so assign it to the requestor */
231 for (j = 0; j < needed; j++)
232 pile->list[i+j] = id | I40E_PILE_VALID_BIT;
233 ret = i;
234 pile->search_hint = i + j;
ddf434ac 235 break;
41c445ff 236 }
6995b36c
JB
237
238 /* not enough, so skip over it and continue looking */
239 i += j;
41c445ff
JB
240 }
241
242 return ret;
243}
244
245/**
246 * i40e_put_lump - return a lump of generic resource
247 * @pile: the pile of resource to search
248 * @index: the base item index
249 * @id: the owner id of the items assigned
250 *
251 * Returns the count of items in the lump
252 **/
253static int i40e_put_lump(struct i40e_lump_tracking *pile, u16 index, u16 id)
254{
255 int valid_id = (id | I40E_PILE_VALID_BIT);
256 int count = 0;
257 int i;
258
259 if (!pile || index >= pile->num_entries)
260 return -EINVAL;
261
262 for (i = index;
263 i < pile->num_entries && pile->list[i] == valid_id;
264 i++) {
265 pile->list[i] = 0;
266 count++;
267 }
268
269 if (count && index < pile->search_hint)
270 pile->search_hint = index;
271
272 return count;
273}
274
fdf0e0bf
ASJ
275/**
276 * i40e_find_vsi_from_id - searches for the vsi with the given id
277 * @pf - the pf structure to search for the vsi
278 * @id - id of the vsi it is searching for
279 **/
280struct i40e_vsi *i40e_find_vsi_from_id(struct i40e_pf *pf, u16 id)
281{
282 int i;
283
284 for (i = 0; i < pf->num_alloc_vsi; i++)
285 if (pf->vsi[i] && (pf->vsi[i]->id == id))
286 return pf->vsi[i];
287
288 return NULL;
289}
290
41c445ff
JB
291/**
292 * i40e_service_event_schedule - Schedule the service task to wake up
293 * @pf: board private structure
294 *
295 * If not already scheduled, this puts the task into the work queue
296 **/
e3219ce6 297void i40e_service_event_schedule(struct i40e_pf *pf)
41c445ff 298{
9e6c9c0f 299 if (!test_bit(__I40E_DOWN, pf->state) &&
0da36b97 300 !test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
2803b16c 301 queue_work(i40e_wq, &pf->service_task);
41c445ff
JB
302}
303
304/**
305 * i40e_tx_timeout - Respond to a Tx Hang
306 * @netdev: network interface device structure
307 *
308 * If any port has noticed a Tx timeout, it is likely that the whole
309 * device is munged, not just the one netdev port, so go for the full
310 * reset.
311 **/
312static void i40e_tx_timeout(struct net_device *netdev)
313{
314 struct i40e_netdev_priv *np = netdev_priv(netdev);
315 struct i40e_vsi *vsi = np->vsi;
316 struct i40e_pf *pf = vsi->back;
b03a8c1f
KP
317 struct i40e_ring *tx_ring = NULL;
318 unsigned int i, hung_queue = 0;
319 u32 head, val;
41c445ff
JB
320
321 pf->tx_timeout_count++;
322
b03a8c1f
KP
323 /* find the stopped queue the same way the stack does */
324 for (i = 0; i < netdev->num_tx_queues; i++) {
325 struct netdev_queue *q;
326 unsigned long trans_start;
327
328 q = netdev_get_tx_queue(netdev, i);
9b36627a 329 trans_start = q->trans_start;
b03a8c1f
KP
330 if (netif_xmit_stopped(q) &&
331 time_after(jiffies,
332 (trans_start + netdev->watchdog_timeo))) {
333 hung_queue = i;
334 break;
335 }
336 }
337
338 if (i == netdev->num_tx_queues) {
339 netdev_info(netdev, "tx_timeout: no netdev hung queue found\n");
340 } else {
341 /* now that we have an index, find the tx_ring struct */
342 for (i = 0; i < vsi->num_queue_pairs; i++) {
343 if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) {
344 if (hung_queue ==
345 vsi->tx_rings[i]->queue_index) {
346 tx_ring = vsi->tx_rings[i];
347 break;
348 }
349 }
350 }
351 }
352
41c445ff 353 if (time_after(jiffies, (pf->tx_timeout_last_recovery + HZ*20)))
b03a8c1f
KP
354 pf->tx_timeout_recovery_level = 1; /* reset after some time */
355 else if (time_before(jiffies,
356 (pf->tx_timeout_last_recovery + netdev->watchdog_timeo)))
357 return; /* don't do any new action before the next timeout */
358
359 if (tx_ring) {
360 head = i40e_get_head(tx_ring);
361 /* Read interrupt register */
362 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
363 val = rd32(&pf->hw,
364 I40E_PFINT_DYN_CTLN(tx_ring->q_vector->v_idx +
365 tx_ring->vsi->base_vector - 1));
366 else
367 val = rd32(&pf->hw, I40E_PFINT_DYN_CTL0);
368
369 netdev_info(netdev, "tx_timeout: VSI_seid: %d, Q %d, NTC: 0x%x, HWB: 0x%x, NTU: 0x%x, TAIL: 0x%x, INT: 0x%x\n",
370 vsi->seid, hung_queue, tx_ring->next_to_clean,
371 head, tx_ring->next_to_use,
372 readl(tx_ring->tail), val);
373 }
374
41c445ff 375 pf->tx_timeout_last_recovery = jiffies;
b03a8c1f
KP
376 netdev_info(netdev, "tx_timeout recovery level %d, hung_queue %d\n",
377 pf->tx_timeout_recovery_level, hung_queue);
41c445ff
JB
378
379 switch (pf->tx_timeout_recovery_level) {
41c445ff 380 case 1:
0da36b97 381 set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
41c445ff
JB
382 break;
383 case 2:
0da36b97 384 set_bit(__I40E_CORE_RESET_REQUESTED, pf->state);
41c445ff
JB
385 break;
386 case 3:
0da36b97 387 set_bit(__I40E_GLOBAL_RESET_REQUESTED, pf->state);
41c445ff
JB
388 break;
389 default:
390 netdev_err(netdev, "tx_timeout recovery unsuccessful\n");
41c445ff
JB
391 break;
392 }
b03a8c1f 393
41c445ff
JB
394 i40e_service_event_schedule(pf);
395 pf->tx_timeout_recovery_level++;
396}
397
41c445ff
JB
398/**
399 * i40e_get_vsi_stats_struct - Get System Network Statistics
400 * @vsi: the VSI we care about
401 *
402 * Returns the address of the device statistics structure.
403 * The statistics are actually updated from the service task.
404 **/
405struct rtnl_link_stats64 *i40e_get_vsi_stats_struct(struct i40e_vsi *vsi)
406{
407 return &vsi->net_stats;
408}
409
74608d17
BT
410/**
411 * i40e_get_netdev_stats_struct_tx - populate stats from a Tx ring
412 * @ring: Tx ring to get statistics from
413 * @stats: statistics entry to be updated
414 **/
415static void i40e_get_netdev_stats_struct_tx(struct i40e_ring *ring,
416 struct rtnl_link_stats64 *stats)
417{
418 u64 bytes, packets;
419 unsigned int start;
420
421 do {
422 start = u64_stats_fetch_begin_irq(&ring->syncp);
423 packets = ring->stats.packets;
424 bytes = ring->stats.bytes;
425 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
426
427 stats->tx_packets += packets;
428 stats->tx_bytes += bytes;
429}
430
41c445ff
JB
431/**
432 * i40e_get_netdev_stats_struct - Get statistics for netdev interface
433 * @netdev: network interface device structure
434 *
435 * Returns the address of the device statistics structure.
436 * The statistics are actually updated from the service task.
437 **/
9eed69a9 438static void i40e_get_netdev_stats_struct(struct net_device *netdev,
bc1f4470 439 struct rtnl_link_stats64 *stats)
41c445ff
JB
440{
441 struct i40e_netdev_priv *np = netdev_priv(netdev);
e7046ee1 442 struct i40e_ring *tx_ring, *rx_ring;
41c445ff 443 struct i40e_vsi *vsi = np->vsi;
980e9b11
AD
444 struct rtnl_link_stats64 *vsi_stats = i40e_get_vsi_stats_struct(vsi);
445 int i;
446
0da36b97 447 if (test_bit(__I40E_VSI_DOWN, vsi->state))
bc1f4470 448 return;
bc7d338f 449
3c325ced 450 if (!vsi->tx_rings)
bc1f4470 451 return;
3c325ced 452
980e9b11
AD
453 rcu_read_lock();
454 for (i = 0; i < vsi->num_queue_pairs; i++) {
980e9b11
AD
455 u64 bytes, packets;
456 unsigned int start;
457
458 tx_ring = ACCESS_ONCE(vsi->tx_rings[i]);
459 if (!tx_ring)
460 continue;
74608d17 461 i40e_get_netdev_stats_struct_tx(tx_ring, stats);
980e9b11 462
980e9b11
AD
463 rx_ring = &tx_ring[1];
464
465 do {
57a7744e 466 start = u64_stats_fetch_begin_irq(&rx_ring->syncp);
980e9b11
AD
467 packets = rx_ring->stats.packets;
468 bytes = rx_ring->stats.bytes;
57a7744e 469 } while (u64_stats_fetch_retry_irq(&rx_ring->syncp, start));
41c445ff 470
980e9b11
AD
471 stats->rx_packets += packets;
472 stats->rx_bytes += bytes;
74608d17
BT
473
474 if (i40e_enabled_xdp_vsi(vsi))
475 i40e_get_netdev_stats_struct_tx(&rx_ring[1], stats);
980e9b11
AD
476 }
477 rcu_read_unlock();
478
a5282f44 479 /* following stats updated by i40e_watchdog_subtask() */
980e9b11
AD
480 stats->multicast = vsi_stats->multicast;
481 stats->tx_errors = vsi_stats->tx_errors;
482 stats->tx_dropped = vsi_stats->tx_dropped;
483 stats->rx_errors = vsi_stats->rx_errors;
d8201e20 484 stats->rx_dropped = vsi_stats->rx_dropped;
980e9b11
AD
485 stats->rx_crc_errors = vsi_stats->rx_crc_errors;
486 stats->rx_length_errors = vsi_stats->rx_length_errors;
41c445ff
JB
487}
488
489/**
490 * i40e_vsi_reset_stats - Resets all stats of the given vsi
491 * @vsi: the VSI to have its stats reset
492 **/
493void i40e_vsi_reset_stats(struct i40e_vsi *vsi)
494{
495 struct rtnl_link_stats64 *ns;
496 int i;
497
498 if (!vsi)
499 return;
500
501 ns = i40e_get_vsi_stats_struct(vsi);
502 memset(ns, 0, sizeof(*ns));
503 memset(&vsi->net_stats_offsets, 0, sizeof(vsi->net_stats_offsets));
504 memset(&vsi->eth_stats, 0, sizeof(vsi->eth_stats));
505 memset(&vsi->eth_stats_offsets, 0, sizeof(vsi->eth_stats_offsets));
8e9dca53 506 if (vsi->rx_rings && vsi->rx_rings[0]) {
41c445ff 507 for (i = 0; i < vsi->num_queue_pairs; i++) {
6995b36c 508 memset(&vsi->rx_rings[i]->stats, 0,
9f65e15b 509 sizeof(vsi->rx_rings[i]->stats));
6995b36c 510 memset(&vsi->rx_rings[i]->rx_stats, 0,
9f65e15b 511 sizeof(vsi->rx_rings[i]->rx_stats));
6995b36c 512 memset(&vsi->tx_rings[i]->stats, 0,
9f65e15b
AD
513 sizeof(vsi->tx_rings[i]->stats));
514 memset(&vsi->tx_rings[i]->tx_stats, 0,
515 sizeof(vsi->tx_rings[i]->tx_stats));
41c445ff 516 }
8e9dca53 517 }
41c445ff
JB
518 vsi->stat_offsets_loaded = false;
519}
520
521/**
b40c82e6 522 * i40e_pf_reset_stats - Reset all of the stats for the given PF
41c445ff
JB
523 * @pf: the PF to be reset
524 **/
525void i40e_pf_reset_stats(struct i40e_pf *pf)
526{
e91fdf76
SN
527 int i;
528
41c445ff
JB
529 memset(&pf->stats, 0, sizeof(pf->stats));
530 memset(&pf->stats_offsets, 0, sizeof(pf->stats_offsets));
531 pf->stat_offsets_loaded = false;
e91fdf76
SN
532
533 for (i = 0; i < I40E_MAX_VEB; i++) {
534 if (pf->veb[i]) {
535 memset(&pf->veb[i]->stats, 0,
536 sizeof(pf->veb[i]->stats));
537 memset(&pf->veb[i]->stats_offsets, 0,
538 sizeof(pf->veb[i]->stats_offsets));
539 pf->veb[i]->stat_offsets_loaded = false;
540 }
541 }
42bce04e 542 pf->hw_csum_rx_error = 0;
41c445ff
JB
543}
544
545/**
546 * i40e_stat_update48 - read and update a 48 bit stat from the chip
547 * @hw: ptr to the hardware info
548 * @hireg: the high 32 bit reg to read
549 * @loreg: the low 32 bit reg to read
550 * @offset_loaded: has the initial offset been loaded yet
551 * @offset: ptr to current offset value
552 * @stat: ptr to the stat
553 *
554 * Since the device stats are not reset at PFReset, they likely will not
555 * be zeroed when the driver starts. We'll save the first values read
556 * and use them as offsets to be subtracted from the raw values in order
557 * to report stats that count from zero. In the process, we also manage
558 * the potential roll-over.
559 **/
560static void i40e_stat_update48(struct i40e_hw *hw, u32 hireg, u32 loreg,
561 bool offset_loaded, u64 *offset, u64 *stat)
562{
563 u64 new_data;
564
ab60085e 565 if (hw->device_id == I40E_DEV_ID_QEMU) {
41c445ff
JB
566 new_data = rd32(hw, loreg);
567 new_data |= ((u64)(rd32(hw, hireg) & 0xFFFF)) << 32;
568 } else {
569 new_data = rd64(hw, loreg);
570 }
571 if (!offset_loaded)
572 *offset = new_data;
573 if (likely(new_data >= *offset))
574 *stat = new_data - *offset;
575 else
41a1d04b 576 *stat = (new_data + BIT_ULL(48)) - *offset;
41c445ff
JB
577 *stat &= 0xFFFFFFFFFFFFULL;
578}
579
580/**
581 * i40e_stat_update32 - read and update a 32 bit stat from the chip
582 * @hw: ptr to the hardware info
583 * @reg: the hw reg to read
584 * @offset_loaded: has the initial offset been loaded yet
585 * @offset: ptr to current offset value
586 * @stat: ptr to the stat
587 **/
588static void i40e_stat_update32(struct i40e_hw *hw, u32 reg,
589 bool offset_loaded, u64 *offset, u64 *stat)
590{
591 u32 new_data;
592
593 new_data = rd32(hw, reg);
594 if (!offset_loaded)
595 *offset = new_data;
596 if (likely(new_data >= *offset))
597 *stat = (u32)(new_data - *offset);
598 else
41a1d04b 599 *stat = (u32)((new_data + BIT_ULL(32)) - *offset);
41c445ff
JB
600}
601
0dc8692e
MS
602/**
603 * i40e_stat_update_and_clear32 - read and clear hw reg, update a 32 bit stat
604 * @hw: ptr to the hardware info
605 * @reg: the hw reg to read and clear
606 * @stat: ptr to the stat
607 **/
608static void i40e_stat_update_and_clear32(struct i40e_hw *hw, u32 reg, u64 *stat)
609{
610 u32 new_data = rd32(hw, reg);
611
612 wr32(hw, reg, 1); /* must write a nonzero value to clear register */
613 *stat += new_data;
614}
615
41c445ff
JB
616/**
617 * i40e_update_eth_stats - Update VSI-specific ethernet statistics counters.
618 * @vsi: the VSI to be updated
619 **/
620void i40e_update_eth_stats(struct i40e_vsi *vsi)
621{
622 int stat_idx = le16_to_cpu(vsi->info.stat_counter_idx);
623 struct i40e_pf *pf = vsi->back;
624 struct i40e_hw *hw = &pf->hw;
625 struct i40e_eth_stats *oes;
626 struct i40e_eth_stats *es; /* device's eth stats */
627
628 es = &vsi->eth_stats;
629 oes = &vsi->eth_stats_offsets;
630
631 /* Gather up the stats that the hw collects */
632 i40e_stat_update32(hw, I40E_GLV_TEPC(stat_idx),
633 vsi->stat_offsets_loaded,
634 &oes->tx_errors, &es->tx_errors);
635 i40e_stat_update32(hw, I40E_GLV_RDPC(stat_idx),
636 vsi->stat_offsets_loaded,
637 &oes->rx_discards, &es->rx_discards);
41a9e55c
SN
638 i40e_stat_update32(hw, I40E_GLV_RUPP(stat_idx),
639 vsi->stat_offsets_loaded,
640 &oes->rx_unknown_protocol, &es->rx_unknown_protocol);
641 i40e_stat_update32(hw, I40E_GLV_TEPC(stat_idx),
642 vsi->stat_offsets_loaded,
643 &oes->tx_errors, &es->tx_errors);
41c445ff
JB
644
645 i40e_stat_update48(hw, I40E_GLV_GORCH(stat_idx),
646 I40E_GLV_GORCL(stat_idx),
647 vsi->stat_offsets_loaded,
648 &oes->rx_bytes, &es->rx_bytes);
649 i40e_stat_update48(hw, I40E_GLV_UPRCH(stat_idx),
650 I40E_GLV_UPRCL(stat_idx),
651 vsi->stat_offsets_loaded,
652 &oes->rx_unicast, &es->rx_unicast);
653 i40e_stat_update48(hw, I40E_GLV_MPRCH(stat_idx),
654 I40E_GLV_MPRCL(stat_idx),
655 vsi->stat_offsets_loaded,
656 &oes->rx_multicast, &es->rx_multicast);
657 i40e_stat_update48(hw, I40E_GLV_BPRCH(stat_idx),
658 I40E_GLV_BPRCL(stat_idx),
659 vsi->stat_offsets_loaded,
660 &oes->rx_broadcast, &es->rx_broadcast);
661
662 i40e_stat_update48(hw, I40E_GLV_GOTCH(stat_idx),
663 I40E_GLV_GOTCL(stat_idx),
664 vsi->stat_offsets_loaded,
665 &oes->tx_bytes, &es->tx_bytes);
666 i40e_stat_update48(hw, I40E_GLV_UPTCH(stat_idx),
667 I40E_GLV_UPTCL(stat_idx),
668 vsi->stat_offsets_loaded,
669 &oes->tx_unicast, &es->tx_unicast);
670 i40e_stat_update48(hw, I40E_GLV_MPTCH(stat_idx),
671 I40E_GLV_MPTCL(stat_idx),
672 vsi->stat_offsets_loaded,
673 &oes->tx_multicast, &es->tx_multicast);
674 i40e_stat_update48(hw, I40E_GLV_BPTCH(stat_idx),
675 I40E_GLV_BPTCL(stat_idx),
676 vsi->stat_offsets_loaded,
677 &oes->tx_broadcast, &es->tx_broadcast);
678 vsi->stat_offsets_loaded = true;
679}
680
681/**
682 * i40e_update_veb_stats - Update Switch component statistics
683 * @veb: the VEB being updated
684 **/
685static void i40e_update_veb_stats(struct i40e_veb *veb)
686{
687 struct i40e_pf *pf = veb->pf;
688 struct i40e_hw *hw = &pf->hw;
689 struct i40e_eth_stats *oes;
690 struct i40e_eth_stats *es; /* device's eth stats */
fe860afb
NP
691 struct i40e_veb_tc_stats *veb_oes;
692 struct i40e_veb_tc_stats *veb_es;
693 int i, idx = 0;
41c445ff
JB
694
695 idx = veb->stats_idx;
696 es = &veb->stats;
697 oes = &veb->stats_offsets;
fe860afb
NP
698 veb_es = &veb->tc_stats;
699 veb_oes = &veb->tc_stats_offsets;
41c445ff
JB
700
701 /* Gather up the stats that the hw collects */
702 i40e_stat_update32(hw, I40E_GLSW_TDPC(idx),
703 veb->stat_offsets_loaded,
704 &oes->tx_discards, &es->tx_discards);
7134f9ce
JB
705 if (hw->revision_id > 0)
706 i40e_stat_update32(hw, I40E_GLSW_RUPP(idx),
707 veb->stat_offsets_loaded,
708 &oes->rx_unknown_protocol,
709 &es->rx_unknown_protocol);
41c445ff
JB
710 i40e_stat_update48(hw, I40E_GLSW_GORCH(idx), I40E_GLSW_GORCL(idx),
711 veb->stat_offsets_loaded,
712 &oes->rx_bytes, &es->rx_bytes);
713 i40e_stat_update48(hw, I40E_GLSW_UPRCH(idx), I40E_GLSW_UPRCL(idx),
714 veb->stat_offsets_loaded,
715 &oes->rx_unicast, &es->rx_unicast);
716 i40e_stat_update48(hw, I40E_GLSW_MPRCH(idx), I40E_GLSW_MPRCL(idx),
717 veb->stat_offsets_loaded,
718 &oes->rx_multicast, &es->rx_multicast);
719 i40e_stat_update48(hw, I40E_GLSW_BPRCH(idx), I40E_GLSW_BPRCL(idx),
720 veb->stat_offsets_loaded,
721 &oes->rx_broadcast, &es->rx_broadcast);
722
723 i40e_stat_update48(hw, I40E_GLSW_GOTCH(idx), I40E_GLSW_GOTCL(idx),
724 veb->stat_offsets_loaded,
725 &oes->tx_bytes, &es->tx_bytes);
726 i40e_stat_update48(hw, I40E_GLSW_UPTCH(idx), I40E_GLSW_UPTCL(idx),
727 veb->stat_offsets_loaded,
728 &oes->tx_unicast, &es->tx_unicast);
729 i40e_stat_update48(hw, I40E_GLSW_MPTCH(idx), I40E_GLSW_MPTCL(idx),
730 veb->stat_offsets_loaded,
731 &oes->tx_multicast, &es->tx_multicast);
732 i40e_stat_update48(hw, I40E_GLSW_BPTCH(idx), I40E_GLSW_BPTCL(idx),
733 veb->stat_offsets_loaded,
734 &oes->tx_broadcast, &es->tx_broadcast);
fe860afb
NP
735 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
736 i40e_stat_update48(hw, I40E_GLVEBTC_RPCH(i, idx),
737 I40E_GLVEBTC_RPCL(i, idx),
738 veb->stat_offsets_loaded,
739 &veb_oes->tc_rx_packets[i],
740 &veb_es->tc_rx_packets[i]);
741 i40e_stat_update48(hw, I40E_GLVEBTC_RBCH(i, idx),
742 I40E_GLVEBTC_RBCL(i, idx),
743 veb->stat_offsets_loaded,
744 &veb_oes->tc_rx_bytes[i],
745 &veb_es->tc_rx_bytes[i]);
746 i40e_stat_update48(hw, I40E_GLVEBTC_TPCH(i, idx),
747 I40E_GLVEBTC_TPCL(i, idx),
748 veb->stat_offsets_loaded,
749 &veb_oes->tc_tx_packets[i],
750 &veb_es->tc_tx_packets[i]);
751 i40e_stat_update48(hw, I40E_GLVEBTC_TBCH(i, idx),
752 I40E_GLVEBTC_TBCL(i, idx),
753 veb->stat_offsets_loaded,
754 &veb_oes->tc_tx_bytes[i],
755 &veb_es->tc_tx_bytes[i]);
756 }
41c445ff
JB
757 veb->stat_offsets_loaded = true;
758}
759
41c445ff 760/**
7812fddc 761 * i40e_update_vsi_stats - Update the vsi statistics counters.
41c445ff
JB
762 * @vsi: the VSI to be updated
763 *
764 * There are a few instances where we store the same stat in a
765 * couple of different structs. This is partly because we have
766 * the netdev stats that need to be filled out, which is slightly
767 * different from the "eth_stats" defined by the chip and used in
7812fddc 768 * VF communications. We sort it out here.
41c445ff 769 **/
7812fddc 770static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
41c445ff
JB
771{
772 struct i40e_pf *pf = vsi->back;
41c445ff
JB
773 struct rtnl_link_stats64 *ons;
774 struct rtnl_link_stats64 *ns; /* netdev stats */
775 struct i40e_eth_stats *oes;
776 struct i40e_eth_stats *es; /* device's eth stats */
777 u32 tx_restart, tx_busy;
bf00b376 778 struct i40e_ring *p;
41c445ff 779 u32 rx_page, rx_buf;
bf00b376
AA
780 u64 bytes, packets;
781 unsigned int start;
2fc3d715 782 u64 tx_linearize;
164c9f54 783 u64 tx_force_wb;
41c445ff
JB
784 u64 rx_p, rx_b;
785 u64 tx_p, tx_b;
41c445ff
JB
786 u16 q;
787
0da36b97
JK
788 if (test_bit(__I40E_VSI_DOWN, vsi->state) ||
789 test_bit(__I40E_CONFIG_BUSY, pf->state))
41c445ff
JB
790 return;
791
792 ns = i40e_get_vsi_stats_struct(vsi);
793 ons = &vsi->net_stats_offsets;
794 es = &vsi->eth_stats;
795 oes = &vsi->eth_stats_offsets;
796
797 /* Gather up the netdev and vsi stats that the driver collects
798 * on the fly during packet processing
799 */
800 rx_b = rx_p = 0;
801 tx_b = tx_p = 0;
164c9f54 802 tx_restart = tx_busy = tx_linearize = tx_force_wb = 0;
41c445ff
JB
803 rx_page = 0;
804 rx_buf = 0;
980e9b11 805 rcu_read_lock();
41c445ff 806 for (q = 0; q < vsi->num_queue_pairs; q++) {
980e9b11
AD
807 /* locate Tx ring */
808 p = ACCESS_ONCE(vsi->tx_rings[q]);
809
810 do {
57a7744e 811 start = u64_stats_fetch_begin_irq(&p->syncp);
980e9b11
AD
812 packets = p->stats.packets;
813 bytes = p->stats.bytes;
57a7744e 814 } while (u64_stats_fetch_retry_irq(&p->syncp, start));
980e9b11
AD
815 tx_b += bytes;
816 tx_p += packets;
817 tx_restart += p->tx_stats.restart_queue;
818 tx_busy += p->tx_stats.tx_busy;
2fc3d715 819 tx_linearize += p->tx_stats.tx_linearize;
164c9f54 820 tx_force_wb += p->tx_stats.tx_force_wb;
41c445ff 821
980e9b11
AD
822 /* Rx queue is part of the same block as Tx queue */
823 p = &p[1];
824 do {
57a7744e 825 start = u64_stats_fetch_begin_irq(&p->syncp);
980e9b11
AD
826 packets = p->stats.packets;
827 bytes = p->stats.bytes;
57a7744e 828 } while (u64_stats_fetch_retry_irq(&p->syncp, start));
980e9b11
AD
829 rx_b += bytes;
830 rx_p += packets;
420136cc
MW
831 rx_buf += p->rx_stats.alloc_buff_failed;
832 rx_page += p->rx_stats.alloc_page_failed;
41c445ff 833 }
980e9b11 834 rcu_read_unlock();
41c445ff
JB
835 vsi->tx_restart = tx_restart;
836 vsi->tx_busy = tx_busy;
2fc3d715 837 vsi->tx_linearize = tx_linearize;
164c9f54 838 vsi->tx_force_wb = tx_force_wb;
41c445ff
JB
839 vsi->rx_page_failed = rx_page;
840 vsi->rx_buf_failed = rx_buf;
841
842 ns->rx_packets = rx_p;
843 ns->rx_bytes = rx_b;
844 ns->tx_packets = tx_p;
845 ns->tx_bytes = tx_b;
846
41c445ff 847 /* update netdev stats from eth stats */
7812fddc 848 i40e_update_eth_stats(vsi);
41c445ff
JB
849 ons->tx_errors = oes->tx_errors;
850 ns->tx_errors = es->tx_errors;
851 ons->multicast = oes->rx_multicast;
852 ns->multicast = es->rx_multicast;
41a9e55c
SN
853 ons->rx_dropped = oes->rx_discards;
854 ns->rx_dropped = es->rx_discards;
41c445ff
JB
855 ons->tx_dropped = oes->tx_discards;
856 ns->tx_dropped = es->tx_discards;
857
7812fddc 858 /* pull in a couple PF stats if this is the main vsi */
41c445ff 859 if (vsi == pf->vsi[pf->lan_vsi]) {
7812fddc
SN
860 ns->rx_crc_errors = pf->stats.crc_errors;
861 ns->rx_errors = pf->stats.crc_errors + pf->stats.illegal_bytes;
862 ns->rx_length_errors = pf->stats.rx_length_errors;
863 }
864}
41c445ff 865
7812fddc 866/**
b40c82e6 867 * i40e_update_pf_stats - Update the PF statistics counters.
7812fddc
SN
868 * @pf: the PF to be updated
869 **/
870static void i40e_update_pf_stats(struct i40e_pf *pf)
871{
872 struct i40e_hw_port_stats *osd = &pf->stats_offsets;
873 struct i40e_hw_port_stats *nsd = &pf->stats;
874 struct i40e_hw *hw = &pf->hw;
875 u32 val;
876 int i;
41c445ff 877
7812fddc
SN
878 i40e_stat_update48(hw, I40E_GLPRT_GORCH(hw->port),
879 I40E_GLPRT_GORCL(hw->port),
880 pf->stat_offsets_loaded,
881 &osd->eth.rx_bytes, &nsd->eth.rx_bytes);
882 i40e_stat_update48(hw, I40E_GLPRT_GOTCH(hw->port),
883 I40E_GLPRT_GOTCL(hw->port),
884 pf->stat_offsets_loaded,
885 &osd->eth.tx_bytes, &nsd->eth.tx_bytes);
886 i40e_stat_update32(hw, I40E_GLPRT_RDPC(hw->port),
887 pf->stat_offsets_loaded,
888 &osd->eth.rx_discards,
889 &nsd->eth.rx_discards);
532d283d
SN
890 i40e_stat_update48(hw, I40E_GLPRT_UPRCH(hw->port),
891 I40E_GLPRT_UPRCL(hw->port),
892 pf->stat_offsets_loaded,
893 &osd->eth.rx_unicast,
894 &nsd->eth.rx_unicast);
7812fddc
SN
895 i40e_stat_update48(hw, I40E_GLPRT_MPRCH(hw->port),
896 I40E_GLPRT_MPRCL(hw->port),
897 pf->stat_offsets_loaded,
898 &osd->eth.rx_multicast,
899 &nsd->eth.rx_multicast);
532d283d
SN
900 i40e_stat_update48(hw, I40E_GLPRT_BPRCH(hw->port),
901 I40E_GLPRT_BPRCL(hw->port),
902 pf->stat_offsets_loaded,
903 &osd->eth.rx_broadcast,
904 &nsd->eth.rx_broadcast);
905 i40e_stat_update48(hw, I40E_GLPRT_UPTCH(hw->port),
906 I40E_GLPRT_UPTCL(hw->port),
907 pf->stat_offsets_loaded,
908 &osd->eth.tx_unicast,
909 &nsd->eth.tx_unicast);
910 i40e_stat_update48(hw, I40E_GLPRT_MPTCH(hw->port),
911 I40E_GLPRT_MPTCL(hw->port),
912 pf->stat_offsets_loaded,
913 &osd->eth.tx_multicast,
914 &nsd->eth.tx_multicast);
915 i40e_stat_update48(hw, I40E_GLPRT_BPTCH(hw->port),
916 I40E_GLPRT_BPTCL(hw->port),
917 pf->stat_offsets_loaded,
918 &osd->eth.tx_broadcast,
919 &nsd->eth.tx_broadcast);
41c445ff 920
7812fddc
SN
921 i40e_stat_update32(hw, I40E_GLPRT_TDOLD(hw->port),
922 pf->stat_offsets_loaded,
923 &osd->tx_dropped_link_down,
924 &nsd->tx_dropped_link_down);
41c445ff 925
7812fddc
SN
926 i40e_stat_update32(hw, I40E_GLPRT_CRCERRS(hw->port),
927 pf->stat_offsets_loaded,
928 &osd->crc_errors, &nsd->crc_errors);
41c445ff 929
7812fddc
SN
930 i40e_stat_update32(hw, I40E_GLPRT_ILLERRC(hw->port),
931 pf->stat_offsets_loaded,
932 &osd->illegal_bytes, &nsd->illegal_bytes);
41c445ff 933
7812fddc
SN
934 i40e_stat_update32(hw, I40E_GLPRT_MLFC(hw->port),
935 pf->stat_offsets_loaded,
936 &osd->mac_local_faults,
937 &nsd->mac_local_faults);
938 i40e_stat_update32(hw, I40E_GLPRT_MRFC(hw->port),
939 pf->stat_offsets_loaded,
940 &osd->mac_remote_faults,
941 &nsd->mac_remote_faults);
41c445ff 942
7812fddc
SN
943 i40e_stat_update32(hw, I40E_GLPRT_RLEC(hw->port),
944 pf->stat_offsets_loaded,
945 &osd->rx_length_errors,
946 &nsd->rx_length_errors);
41c445ff 947
7812fddc
SN
948 i40e_stat_update32(hw, I40E_GLPRT_LXONRXC(hw->port),
949 pf->stat_offsets_loaded,
950 &osd->link_xon_rx, &nsd->link_xon_rx);
951 i40e_stat_update32(hw, I40E_GLPRT_LXONTXC(hw->port),
952 pf->stat_offsets_loaded,
953 &osd->link_xon_tx, &nsd->link_xon_tx);
95db239f
NP
954 i40e_stat_update32(hw, I40E_GLPRT_LXOFFRXC(hw->port),
955 pf->stat_offsets_loaded,
956 &osd->link_xoff_rx, &nsd->link_xoff_rx);
7812fddc
SN
957 i40e_stat_update32(hw, I40E_GLPRT_LXOFFTXC(hw->port),
958 pf->stat_offsets_loaded,
959 &osd->link_xoff_tx, &nsd->link_xoff_tx);
41c445ff 960
7812fddc 961 for (i = 0; i < 8; i++) {
95db239f
NP
962 i40e_stat_update32(hw, I40E_GLPRT_PXOFFRXC(hw->port, i),
963 pf->stat_offsets_loaded,
964 &osd->priority_xoff_rx[i],
965 &nsd->priority_xoff_rx[i]);
7812fddc 966 i40e_stat_update32(hw, I40E_GLPRT_PXONRXC(hw->port, i),
41c445ff 967 pf->stat_offsets_loaded,
7812fddc
SN
968 &osd->priority_xon_rx[i],
969 &nsd->priority_xon_rx[i]);
970 i40e_stat_update32(hw, I40E_GLPRT_PXONTXC(hw->port, i),
41c445ff 971 pf->stat_offsets_loaded,
7812fddc
SN
972 &osd->priority_xon_tx[i],
973 &nsd->priority_xon_tx[i]);
974 i40e_stat_update32(hw, I40E_GLPRT_PXOFFTXC(hw->port, i),
41c445ff 975 pf->stat_offsets_loaded,
7812fddc
SN
976 &osd->priority_xoff_tx[i],
977 &nsd->priority_xoff_tx[i]);
978 i40e_stat_update32(hw,
979 I40E_GLPRT_RXON2OFFCNT(hw->port, i),
bee5af7e 980 pf->stat_offsets_loaded,
7812fddc
SN
981 &osd->priority_xon_2_xoff[i],
982 &nsd->priority_xon_2_xoff[i]);
41c445ff
JB
983 }
984
7812fddc
SN
985 i40e_stat_update48(hw, I40E_GLPRT_PRC64H(hw->port),
986 I40E_GLPRT_PRC64L(hw->port),
987 pf->stat_offsets_loaded,
988 &osd->rx_size_64, &nsd->rx_size_64);
989 i40e_stat_update48(hw, I40E_GLPRT_PRC127H(hw->port),
990 I40E_GLPRT_PRC127L(hw->port),
991 pf->stat_offsets_loaded,
992 &osd->rx_size_127, &nsd->rx_size_127);
993 i40e_stat_update48(hw, I40E_GLPRT_PRC255H(hw->port),
994 I40E_GLPRT_PRC255L(hw->port),
995 pf->stat_offsets_loaded,
996 &osd->rx_size_255, &nsd->rx_size_255);
997 i40e_stat_update48(hw, I40E_GLPRT_PRC511H(hw->port),
998 I40E_GLPRT_PRC511L(hw->port),
999 pf->stat_offsets_loaded,
1000 &osd->rx_size_511, &nsd->rx_size_511);
1001 i40e_stat_update48(hw, I40E_GLPRT_PRC1023H(hw->port),
1002 I40E_GLPRT_PRC1023L(hw->port),
1003 pf->stat_offsets_loaded,
1004 &osd->rx_size_1023, &nsd->rx_size_1023);
1005 i40e_stat_update48(hw, I40E_GLPRT_PRC1522H(hw->port),
1006 I40E_GLPRT_PRC1522L(hw->port),
1007 pf->stat_offsets_loaded,
1008 &osd->rx_size_1522, &nsd->rx_size_1522);
1009 i40e_stat_update48(hw, I40E_GLPRT_PRC9522H(hw->port),
1010 I40E_GLPRT_PRC9522L(hw->port),
1011 pf->stat_offsets_loaded,
1012 &osd->rx_size_big, &nsd->rx_size_big);
1013
1014 i40e_stat_update48(hw, I40E_GLPRT_PTC64H(hw->port),
1015 I40E_GLPRT_PTC64L(hw->port),
1016 pf->stat_offsets_loaded,
1017 &osd->tx_size_64, &nsd->tx_size_64);
1018 i40e_stat_update48(hw, I40E_GLPRT_PTC127H(hw->port),
1019 I40E_GLPRT_PTC127L(hw->port),
1020 pf->stat_offsets_loaded,
1021 &osd->tx_size_127, &nsd->tx_size_127);
1022 i40e_stat_update48(hw, I40E_GLPRT_PTC255H(hw->port),
1023 I40E_GLPRT_PTC255L(hw->port),
1024 pf->stat_offsets_loaded,
1025 &osd->tx_size_255, &nsd->tx_size_255);
1026 i40e_stat_update48(hw, I40E_GLPRT_PTC511H(hw->port),
1027 I40E_GLPRT_PTC511L(hw->port),
1028 pf->stat_offsets_loaded,
1029 &osd->tx_size_511, &nsd->tx_size_511);
1030 i40e_stat_update48(hw, I40E_GLPRT_PTC1023H(hw->port),
1031 I40E_GLPRT_PTC1023L(hw->port),
1032 pf->stat_offsets_loaded,
1033 &osd->tx_size_1023, &nsd->tx_size_1023);
1034 i40e_stat_update48(hw, I40E_GLPRT_PTC1522H(hw->port),
1035 I40E_GLPRT_PTC1522L(hw->port),
1036 pf->stat_offsets_loaded,
1037 &osd->tx_size_1522, &nsd->tx_size_1522);
1038 i40e_stat_update48(hw, I40E_GLPRT_PTC9522H(hw->port),
1039 I40E_GLPRT_PTC9522L(hw->port),
1040 pf->stat_offsets_loaded,
1041 &osd->tx_size_big, &nsd->tx_size_big);
1042
1043 i40e_stat_update32(hw, I40E_GLPRT_RUC(hw->port),
1044 pf->stat_offsets_loaded,
1045 &osd->rx_undersize, &nsd->rx_undersize);
1046 i40e_stat_update32(hw, I40E_GLPRT_RFC(hw->port),
1047 pf->stat_offsets_loaded,
1048 &osd->rx_fragments, &nsd->rx_fragments);
1049 i40e_stat_update32(hw, I40E_GLPRT_ROC(hw->port),
1050 pf->stat_offsets_loaded,
1051 &osd->rx_oversize, &nsd->rx_oversize);
1052 i40e_stat_update32(hw, I40E_GLPRT_RJC(hw->port),
1053 pf->stat_offsets_loaded,
1054 &osd->rx_jabber, &nsd->rx_jabber);
1055
433c47de 1056 /* FDIR stats */
0dc8692e
MS
1057 i40e_stat_update_and_clear32(hw,
1058 I40E_GLQF_PCNT(I40E_FD_ATR_STAT_IDX(hw->pf_id)),
1059 &nsd->fd_atr_match);
1060 i40e_stat_update_and_clear32(hw,
1061 I40E_GLQF_PCNT(I40E_FD_SB_STAT_IDX(hw->pf_id)),
1062 &nsd->fd_sb_match);
1063 i40e_stat_update_and_clear32(hw,
1064 I40E_GLQF_PCNT(I40E_FD_ATR_TUNNEL_STAT_IDX(hw->pf_id)),
1065 &nsd->fd_atr_tunnel_match);
433c47de 1066
7812fddc
SN
1067 val = rd32(hw, I40E_PRTPM_EEE_STAT);
1068 nsd->tx_lpi_status =
1069 (val & I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_MASK) >>
1070 I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_SHIFT;
1071 nsd->rx_lpi_status =
1072 (val & I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_MASK) >>
1073 I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_SHIFT;
1074 i40e_stat_update32(hw, I40E_PRTPM_TLPIC,
1075 pf->stat_offsets_loaded,
1076 &osd->tx_lpi_count, &nsd->tx_lpi_count);
1077 i40e_stat_update32(hw, I40E_PRTPM_RLPIC,
1078 pf->stat_offsets_loaded,
1079 &osd->rx_lpi_count, &nsd->rx_lpi_count);
1080
d0389e51 1081 if (pf->flags & I40E_FLAG_FD_SB_ENABLED &&
47994c11 1082 !(pf->flags & I40E_FLAG_FD_SB_AUTO_DISABLED))
d0389e51
ASJ
1083 nsd->fd_sb_status = true;
1084 else
1085 nsd->fd_sb_status = false;
1086
1087 if (pf->flags & I40E_FLAG_FD_ATR_ENABLED &&
47994c11 1088 !(pf->flags & I40E_FLAG_FD_ATR_AUTO_DISABLED))
d0389e51
ASJ
1089 nsd->fd_atr_status = true;
1090 else
1091 nsd->fd_atr_status = false;
1092
41c445ff
JB
1093 pf->stat_offsets_loaded = true;
1094}
1095
7812fddc
SN
1096/**
1097 * i40e_update_stats - Update the various statistics counters.
1098 * @vsi: the VSI to be updated
1099 *
1100 * Update the various stats for this VSI and its related entities.
1101 **/
1102void i40e_update_stats(struct i40e_vsi *vsi)
1103{
1104 struct i40e_pf *pf = vsi->back;
1105
1106 if (vsi == pf->vsi[pf->lan_vsi])
1107 i40e_update_pf_stats(pf);
1108
1109 i40e_update_vsi_stats(vsi);
1110}
1111
41c445ff
JB
1112/**
1113 * i40e_find_filter - Search VSI filter list for specific mac/vlan filter
1114 * @vsi: the VSI to be searched
1115 * @macaddr: the MAC address
1116 * @vlan: the vlan
41c445ff
JB
1117 *
1118 * Returns ptr to the filter object or NULL
1119 **/
1120static struct i40e_mac_filter *i40e_find_filter(struct i40e_vsi *vsi,
6622f5cd 1121 const u8 *macaddr, s16 vlan)
41c445ff
JB
1122{
1123 struct i40e_mac_filter *f;
278e7d0b 1124 u64 key;
41c445ff
JB
1125
1126 if (!vsi || !macaddr)
1127 return NULL;
1128
278e7d0b
JK
1129 key = i40e_addr_to_hkey(macaddr);
1130 hash_for_each_possible(vsi->mac_filter_hash, f, hlist, key) {
41c445ff 1131 if ((ether_addr_equal(macaddr, f->macaddr)) &&
1bc87e80 1132 (vlan == f->vlan))
41c445ff
JB
1133 return f;
1134 }
1135 return NULL;
1136}
1137
1138/**
1139 * i40e_find_mac - Find a mac addr in the macvlan filters list
1140 * @vsi: the VSI to be searched
1141 * @macaddr: the MAC address we are searching for
41c445ff
JB
1142 *
1143 * Returns the first filter with the provided MAC address or NULL if
1144 * MAC address was not found
1145 **/
6622f5cd 1146struct i40e_mac_filter *i40e_find_mac(struct i40e_vsi *vsi, const u8 *macaddr)
41c445ff
JB
1147{
1148 struct i40e_mac_filter *f;
278e7d0b 1149 u64 key;
41c445ff
JB
1150
1151 if (!vsi || !macaddr)
1152 return NULL;
1153
278e7d0b
JK
1154 key = i40e_addr_to_hkey(macaddr);
1155 hash_for_each_possible(vsi->mac_filter_hash, f, hlist, key) {
1bc87e80 1156 if ((ether_addr_equal(macaddr, f->macaddr)))
41c445ff
JB
1157 return f;
1158 }
1159 return NULL;
1160}
1161
1162/**
1163 * i40e_is_vsi_in_vlan - Check if VSI is in vlan mode
1164 * @vsi: the VSI to be searched
1165 *
1166 * Returns true if VSI is in vlan mode or false otherwise
1167 **/
1168bool i40e_is_vsi_in_vlan(struct i40e_vsi *vsi)
1169{
cbebb85f
JK
1170 /* If we have a PVID, always operate in VLAN mode */
1171 if (vsi->info.pvid)
1172 return true;
1173
1174 /* We need to operate in VLAN mode whenever we have any filters with
1175 * a VLAN other than I40E_VLAN_ALL. We could check the table each
1176 * time, incurring search cost repeatedly. However, we can notice two
1177 * things:
1178 *
1179 * 1) the only place where we can gain a VLAN filter is in
1180 * i40e_add_filter.
1181 *
1182 * 2) the only place where filters are actually removed is in
0b7c8b5d 1183 * i40e_sync_filters_subtask.
cbebb85f
JK
1184 *
1185 * Thus, we can simply use a boolean value, has_vlan_filters which we
1186 * will set to true when we add a VLAN filter in i40e_add_filter. Then
1187 * we have to perform the full search after deleting filters in
0b7c8b5d 1188 * i40e_sync_filters_subtask, but we already have to search
cbebb85f
JK
1189 * filters here and can perform the check at the same time. This
1190 * results in avoiding embedding a loop for VLAN mode inside another
1191 * loop over all the filters, and should maintain correctness as noted
1192 * above.
41c445ff 1193 */
cbebb85f 1194 return vsi->has_vlan_filter;
41c445ff
JB
1195}
1196
489a3265
JK
1197/**
1198 * i40e_correct_mac_vlan_filters - Correct non-VLAN filters if necessary
1199 * @vsi: the VSI to configure
1200 * @tmp_add_list: list of filters ready to be added
1201 * @tmp_del_list: list of filters ready to be deleted
1202 * @vlan_filters: the number of active VLAN filters
1203 *
1204 * Update VLAN=0 and VLAN=-1 (I40E_VLAN_ANY) filters properly so that they
1205 * behave as expected. If we have any active VLAN filters remaining or about
1206 * to be added then we need to update non-VLAN filters to be marked as VLAN=0
1207 * so that they only match against untagged traffic. If we no longer have any
1208 * active VLAN filters, we need to make all non-VLAN filters marked as VLAN=-1
1209 * so that they match against both tagged and untagged traffic. In this way,
1210 * we ensure that we correctly receive the desired traffic. This ensures that
1211 * when we have an active VLAN we will receive only untagged traffic and
1212 * traffic matching active VLANs. If we have no active VLANs then we will
1213 * operate in non-VLAN mode and receive all traffic, tagged or untagged.
1214 *
1215 * Finally, in a similar fashion, this function also corrects filters when
1216 * there is an active PVID assigned to this VSI.
1217 *
1218 * In case of memory allocation failure return -ENOMEM. Otherwise, return 0.
1219 *
1220 * This function is only expected to be called from within
1221 * i40e_sync_vsi_filters.
1222 *
1223 * NOTE: This function expects to be called while under the
1224 * mac_filter_hash_lock
1225 */
1226static int i40e_correct_mac_vlan_filters(struct i40e_vsi *vsi,
1227 struct hlist_head *tmp_add_list,
1228 struct hlist_head *tmp_del_list,
1229 int vlan_filters)
1230{
5cb25901 1231 s16 pvid = le16_to_cpu(vsi->info.pvid);
489a3265 1232 struct i40e_mac_filter *f, *add_head;
671889e6 1233 struct i40e_new_mac_filter *new;
489a3265
JK
1234 struct hlist_node *h;
1235 int bkt, new_vlan;
1236
1237 /* To determine if a particular filter needs to be replaced we
1238 * have the three following conditions:
1239 *
1240 * a) if we have a PVID assigned, then all filters which are
1241 * not marked as VLAN=PVID must be replaced with filters that
1242 * are.
1243 * b) otherwise, if we have any active VLANS, all filters
1244 * which are marked as VLAN=-1 must be replaced with
1245 * filters marked as VLAN=0
1246 * c) finally, if we do not have any active VLANS, all filters
1247 * which are marked as VLAN=0 must be replaced with filters
1248 * marked as VLAN=-1
1249 */
1250
1251 /* Update the filters about to be added in place */
671889e6 1252 hlist_for_each_entry(new, tmp_add_list, hlist) {
5cb25901
JK
1253 if (pvid && new->f->vlan != pvid)
1254 new->f->vlan = pvid;
671889e6
JK
1255 else if (vlan_filters && new->f->vlan == I40E_VLAN_ANY)
1256 new->f->vlan = 0;
1257 else if (!vlan_filters && new->f->vlan == 0)
1258 new->f->vlan = I40E_VLAN_ANY;
489a3265
JK
1259 }
1260
1261 /* Update the remaining active filters */
1262 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
1263 /* Combine the checks for whether a filter needs to be changed
1264 * and then determine the new VLAN inside the if block, in
1265 * order to avoid duplicating code for adding the new filter
1266 * then deleting the old filter.
1267 */
5cb25901 1268 if ((pvid && f->vlan != pvid) ||
489a3265
JK
1269 (vlan_filters && f->vlan == I40E_VLAN_ANY) ||
1270 (!vlan_filters && f->vlan == 0)) {
1271 /* Determine the new vlan we will be adding */
5cb25901
JK
1272 if (pvid)
1273 new_vlan = pvid;
489a3265
JK
1274 else if (vlan_filters)
1275 new_vlan = 0;
1276 else
1277 new_vlan = I40E_VLAN_ANY;
1278
1279 /* Create the new filter */
1280 add_head = i40e_add_filter(vsi, f->macaddr, new_vlan);
1281 if (!add_head)
1282 return -ENOMEM;
1283
671889e6
JK
1284 /* Create a temporary i40e_new_mac_filter */
1285 new = kzalloc(sizeof(*new), GFP_ATOMIC);
1286 if (!new)
1287 return -ENOMEM;
1288
1289 new->f = add_head;
1290 new->state = add_head->state;
1291
1292 /* Add the new filter to the tmp list */
1293 hlist_add_head(&new->hlist, tmp_add_list);
489a3265
JK
1294
1295 /* Put the original filter into the delete list */
1296 f->state = I40E_FILTER_REMOVE;
1297 hash_del(&f->hlist);
1298 hlist_add_head(&f->hlist, tmp_del_list);
1299 }
1300 }
1301
1302 vsi->has_vlan_filter = !!vlan_filters;
1303
1304 return 0;
1305}
1306
1596b5dd
JK
1307/**
1308 * i40e_rm_default_mac_filter - Remove the default MAC filter set by NVM
1309 * @vsi: the PF Main VSI - inappropriate for any other VSI
1310 * @macaddr: the MAC address
1311 *
1312 * Remove whatever filter the firmware set up so the driver can manage
1313 * its own filtering intelligently.
1314 **/
1315static void i40e_rm_default_mac_filter(struct i40e_vsi *vsi, u8 *macaddr)
1316{
1317 struct i40e_aqc_remove_macvlan_element_data element;
1318 struct i40e_pf *pf = vsi->back;
1319
1320 /* Only appropriate for the PF main VSI */
1321 if (vsi->type != I40E_VSI_MAIN)
1322 return;
1323
1324 memset(&element, 0, sizeof(element));
1325 ether_addr_copy(element.mac_addr, macaddr);
1326 element.vlan_tag = 0;
1327 /* Ignore error returns, some firmware does it this way... */
1328 element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
1329 i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL);
1330
1331 memset(&element, 0, sizeof(element));
1332 ether_addr_copy(element.mac_addr, macaddr);
1333 element.vlan_tag = 0;
1334 /* ...and some firmware does it this way. */
1335 element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
1336 I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
1337 i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL);
1338}
1339
41c445ff
JB
1340/**
1341 * i40e_add_filter - Add a mac/vlan filter to the VSI
1342 * @vsi: the VSI to be searched
1343 * @macaddr: the MAC address
1344 * @vlan: the vlan
41c445ff
JB
1345 *
1346 * Returns ptr to the filter object or NULL when no memory available.
21659035 1347 *
278e7d0b 1348 * NOTE: This function is expected to be called with mac_filter_hash_lock
21659035 1349 * being held.
41c445ff
JB
1350 **/
1351struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi,
6622f5cd 1352 const u8 *macaddr, s16 vlan)
41c445ff
JB
1353{
1354 struct i40e_mac_filter *f;
278e7d0b 1355 u64 key;
41c445ff
JB
1356
1357 if (!vsi || !macaddr)
1358 return NULL;
1359
1bc87e80 1360 f = i40e_find_filter(vsi, macaddr, vlan);
41c445ff
JB
1361 if (!f) {
1362 f = kzalloc(sizeof(*f), GFP_ATOMIC);
1363 if (!f)
1bc87e80 1364 return NULL;
41c445ff 1365
cbebb85f
JK
1366 /* Update the boolean indicating if we need to function in
1367 * VLAN mode.
1368 */
1369 if (vlan >= 0)
1370 vsi->has_vlan_filter = true;
1371
9a173901 1372 ether_addr_copy(f->macaddr, macaddr);
41c445ff 1373 f->vlan = vlan;
c3c7ea27
MW
1374 /* If we're in overflow promisc mode, set the state directly
1375 * to failed, so we don't bother to try sending the filter
1376 * to the hardware.
1377 */
0da36b97 1378 if (test_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state))
c3c7ea27
MW
1379 f->state = I40E_FILTER_FAILED;
1380 else
1381 f->state = I40E_FILTER_NEW;
278e7d0b
JK
1382 INIT_HLIST_NODE(&f->hlist);
1383
1384 key = i40e_addr_to_hkey(macaddr);
1385 hash_add(vsi->mac_filter_hash, &f->hlist, key);
41c445ff 1386
41c445ff
JB
1387 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
1388 vsi->back->flags |= I40E_FLAG_FILTER_SYNC;
1389 }
1390
1bc87e80
JK
1391 /* If we're asked to add a filter that has been marked for removal, it
1392 * is safe to simply restore it to active state. __i40e_del_filter
1393 * will have simply deleted any filters which were previously marked
1394 * NEW or FAILED, so if it is currently marked REMOVE it must have
1395 * previously been ACTIVE. Since we haven't yet run the sync filters
1396 * task, just restore this filter to the ACTIVE state so that the
1397 * sync task leaves it in place
1398 */
1399 if (f->state == I40E_FILTER_REMOVE)
1400 f->state = I40E_FILTER_ACTIVE;
1401
41c445ff
JB
1402 return f;
1403}
1404
1405/**
290d2557
JK
1406 * __i40e_del_filter - Remove a specific filter from the VSI
1407 * @vsi: VSI to remove from
1408 * @f: the filter to remove from the list
1409 *
1410 * This function should be called instead of i40e_del_filter only if you know
1411 * the exact filter you will remove already, such as via i40e_find_filter or
1412 * i40e_find_mac.
21659035 1413 *
278e7d0b 1414 * NOTE: This function is expected to be called with mac_filter_hash_lock
21659035 1415 * being held.
c3c7ea27
MW
1416 * ANOTHER NOTE: This function MUST be called from within the context of
1417 * the "safe" variants of any list iterators, e.g. list_for_each_entry_safe()
1418 * instead of list_for_each_entry().
41c445ff 1419 **/
148141bb 1420void __i40e_del_filter(struct i40e_vsi *vsi, struct i40e_mac_filter *f)
41c445ff 1421{
1bc87e80 1422 if (!f)
41c445ff
JB
1423 return;
1424
a410c821
AB
1425 /* If the filter was never added to firmware then we can just delete it
1426 * directly and we don't want to set the status to remove or else an
1427 * admin queue command will unnecessarily fire.
1428 */
1bc87e80
JK
1429 if ((f->state == I40E_FILTER_FAILED) ||
1430 (f->state == I40E_FILTER_NEW)) {
278e7d0b 1431 hash_del(&f->hlist);
1bc87e80 1432 kfree(f);
41c445ff 1433 } else {
1bc87e80 1434 f->state = I40E_FILTER_REMOVE;
41c445ff 1435 }
a410c821
AB
1436
1437 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
1438 vsi->back->flags |= I40E_FLAG_FILTER_SYNC;
41c445ff
JB
1439}
1440
290d2557
JK
1441/**
1442 * i40e_del_filter - Remove a MAC/VLAN filter from the VSI
1443 * @vsi: the VSI to be searched
1444 * @macaddr: the MAC address
1445 * @vlan: the VLAN
1446 *
278e7d0b 1447 * NOTE: This function is expected to be called with mac_filter_hash_lock
290d2557
JK
1448 * being held.
1449 * ANOTHER NOTE: This function MUST be called from within the context of
1450 * the "safe" variants of any list iterators, e.g. list_for_each_entry_safe()
1451 * instead of list_for_each_entry().
1452 **/
1453void i40e_del_filter(struct i40e_vsi *vsi, const u8 *macaddr, s16 vlan)
1454{
1455 struct i40e_mac_filter *f;
1456
1457 if (!vsi || !macaddr)
1458 return;
1459
1460 f = i40e_find_filter(vsi, macaddr, vlan);
1461 __i40e_del_filter(vsi, f);
1462}
1463
35ec2ff3 1464/**
feffdbe4 1465 * i40e_add_mac_filter - Add a MAC filter for all active VLANs
35ec2ff3
JK
1466 * @vsi: the VSI to be searched
1467 * @macaddr: the mac address to be filtered
1468 *
feffdbe4
JK
1469 * If we're not in VLAN mode, just add the filter to I40E_VLAN_ANY. Otherwise,
1470 * go through all the macvlan filters and add a macvlan filter for each
5feb3d7b
JK
1471 * unique vlan that already exists. If a PVID has been assigned, instead only
1472 * add the macaddr to that VLAN.
35ec2ff3 1473 *
5feb3d7b 1474 * Returns last filter added on success, else NULL
35ec2ff3 1475 **/
feffdbe4
JK
1476struct i40e_mac_filter *i40e_add_mac_filter(struct i40e_vsi *vsi,
1477 const u8 *macaddr)
35ec2ff3 1478{
5feb3d7b 1479 struct i40e_mac_filter *f, *add = NULL;
278e7d0b
JK
1480 struct hlist_node *h;
1481 int bkt;
5feb3d7b
JK
1482
1483 if (vsi->info.pvid)
1484 return i40e_add_filter(vsi, macaddr,
1485 le16_to_cpu(vsi->info.pvid));
35ec2ff3 1486
7aaf9536
JK
1487 if (!i40e_is_vsi_in_vlan(vsi))
1488 return i40e_add_filter(vsi, macaddr, I40E_VLAN_ANY);
1489
278e7d0b 1490 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
57b341d6
JK
1491 if (f->state == I40E_FILTER_REMOVE)
1492 continue;
5feb3d7b
JK
1493 add = i40e_add_filter(vsi, macaddr, f->vlan);
1494 if (!add)
1495 return NULL;
35ec2ff3
JK
1496 }
1497
5feb3d7b 1498 return add;
35ec2ff3
JK
1499}
1500
1501/**
feffdbe4 1502 * i40e_del_mac_filter - Remove a MAC filter from all VLANs
35ec2ff3
JK
1503 * @vsi: the VSI to be searched
1504 * @macaddr: the mac address to be removed
1505 *
feffdbe4
JK
1506 * Removes a given MAC address from a VSI regardless of what VLAN it has been
1507 * associated with.
35ec2ff3
JK
1508 *
1509 * Returns 0 for success, or error
1510 **/
feffdbe4 1511int i40e_del_mac_filter(struct i40e_vsi *vsi, const u8 *macaddr)
35ec2ff3 1512{
278e7d0b
JK
1513 struct i40e_mac_filter *f;
1514 struct hlist_node *h;
290d2557 1515 bool found = false;
278e7d0b 1516 int bkt;
35ec2ff3 1517
278e7d0b
JK
1518 WARN(!spin_is_locked(&vsi->mac_filter_hash_lock),
1519 "Missing mac_filter_hash_lock\n");
1520 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
290d2557
JK
1521 if (ether_addr_equal(macaddr, f->macaddr)) {
1522 __i40e_del_filter(vsi, f);
1523 found = true;
1524 }
35ec2ff3 1525 }
290d2557
JK
1526
1527 if (found)
35ec2ff3 1528 return 0;
290d2557
JK
1529 else
1530 return -ENOENT;
35ec2ff3
JK
1531}
1532
41c445ff
JB
1533/**
1534 * i40e_set_mac - NDO callback to set mac address
1535 * @netdev: network interface device structure
1536 * @p: pointer to an address structure
1537 *
1538 * Returns 0 on success, negative on failure
1539 **/
1540static int i40e_set_mac(struct net_device *netdev, void *p)
1541{
1542 struct i40e_netdev_priv *np = netdev_priv(netdev);
1543 struct i40e_vsi *vsi = np->vsi;
30650cc5
SN
1544 struct i40e_pf *pf = vsi->back;
1545 struct i40e_hw *hw = &pf->hw;
41c445ff 1546 struct sockaddr *addr = p;
41c445ff
JB
1547
1548 if (!is_valid_ether_addr(addr->sa_data))
1549 return -EADDRNOTAVAIL;
1550
30650cc5
SN
1551 if (ether_addr_equal(netdev->dev_addr, addr->sa_data)) {
1552 netdev_info(netdev, "already using mac address %pM\n",
1553 addr->sa_data);
1554 return 0;
1555 }
41c445ff 1556
0da36b97
JK
1557 if (test_bit(__I40E_VSI_DOWN, vsi->back->state) ||
1558 test_bit(__I40E_RESET_RECOVERY_PENDING, vsi->back->state))
80f6428f
ASJ
1559 return -EADDRNOTAVAIL;
1560
30650cc5
SN
1561 if (ether_addr_equal(hw->mac.addr, addr->sa_data))
1562 netdev_info(netdev, "returning to hw mac address %pM\n",
1563 hw->mac.addr);
1564 else
1565 netdev_info(netdev, "set new mac address %pM\n", addr->sa_data);
1566
278e7d0b 1567 spin_lock_bh(&vsi->mac_filter_hash_lock);
feffdbe4
JK
1568 i40e_del_mac_filter(vsi, netdev->dev_addr);
1569 i40e_add_mac_filter(vsi, addr->sa_data);
278e7d0b 1570 spin_unlock_bh(&vsi->mac_filter_hash_lock);
c3c7ea27 1571 ether_addr_copy(netdev->dev_addr, addr->sa_data);
41c445ff
JB
1572 if (vsi->type == I40E_VSI_MAIN) {
1573 i40e_status ret;
6995b36c 1574
41c445ff 1575 ret = i40e_aq_mac_address_write(&vsi->back->hw,
cc41222c 1576 I40E_AQC_WRITE_TYPE_LAA_WOL,
41c445ff 1577 addr->sa_data, NULL);
c3c7ea27
MW
1578 if (ret)
1579 netdev_info(netdev, "Ignoring error from firmware on LAA update, status %s, AQ ret %s\n",
1580 i40e_stat_str(hw, ret),
1581 i40e_aq_str(hw, hw->aq.asq_last_status));
30650cc5
SN
1582 }
1583
c53934c6
JB
1584 /* schedule our worker thread which will take care of
1585 * applying the new filter changes
1586 */
1587 i40e_service_event_schedule(vsi->back);
1588 return 0;
41c445ff
JB
1589}
1590
1591/**
1592 * i40e_vsi_setup_queue_map - Setup a VSI queue map based on enabled_tc
1593 * @vsi: the VSI being setup
1594 * @ctxt: VSI context structure
1595 * @enabled_tc: Enabled TCs bitmap
1596 * @is_add: True if called before Add VSI
1597 *
1598 * Setup VSI queue mapping for enabled traffic classes.
1599 **/
1600static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
1601 struct i40e_vsi_context *ctxt,
1602 u8 enabled_tc,
1603 bool is_add)
1604{
1605 struct i40e_pf *pf = vsi->back;
1606 u16 sections = 0;
1607 u8 netdev_tc = 0;
1608 u16 numtc = 0;
1609 u16 qcount;
1610 u8 offset;
1611 u16 qmap;
1612 int i;
4e3b35b0 1613 u16 num_tc_qps = 0;
41c445ff
JB
1614
1615 sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
1616 offset = 0;
1617
1618 if (enabled_tc && (vsi->back->flags & I40E_FLAG_DCB_ENABLED)) {
1619 /* Find numtc from enabled TC bitmap */
1620 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
75f5cea9 1621 if (enabled_tc & BIT(i)) /* TC is enabled */
41c445ff
JB
1622 numtc++;
1623 }
1624 if (!numtc) {
1625 dev_warn(&pf->pdev->dev, "DCB is enabled but no TC enabled, forcing TC0\n");
1626 numtc = 1;
1627 }
1628 } else {
1629 /* At least TC0 is enabled in case of non-DCB case */
1630 numtc = 1;
1631 }
1632
1633 vsi->tc_config.numtc = numtc;
1634 vsi->tc_config.enabled_tc = enabled_tc ? enabled_tc : 1;
4e3b35b0 1635 /* Number of queues per enabled TC */
7d64402f
CS
1636 qcount = vsi->alloc_queue_pairs;
1637
7f9ff476 1638 num_tc_qps = qcount / numtc;
e25d00b8 1639 num_tc_qps = min_t(int, num_tc_qps, i40e_pf_get_max_q_per_tc(pf));
41c445ff
JB
1640
1641 /* Setup queue offset/count for all TCs for given VSI */
1642 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1643 /* See if the given TC is enabled for the given VSI */
75f5cea9 1644 if (vsi->tc_config.enabled_tc & BIT(i)) {
41a1d04b 1645 /* TC is enabled */
41c445ff
JB
1646 int pow, num_qps;
1647
41c445ff
JB
1648 switch (vsi->type) {
1649 case I40E_VSI_MAIN:
acd65448
HZ
1650 qcount = min_t(int, pf->alloc_rss_size,
1651 num_tc_qps);
41c445ff
JB
1652 break;
1653 case I40E_VSI_FDIR:
1654 case I40E_VSI_SRIOV:
1655 case I40E_VSI_VMDQ2:
1656 default:
4e3b35b0 1657 qcount = num_tc_qps;
41c445ff
JB
1658 WARN_ON(i != 0);
1659 break;
1660 }
4e3b35b0
NP
1661 vsi->tc_config.tc_info[i].qoffset = offset;
1662 vsi->tc_config.tc_info[i].qcount = qcount;
41c445ff 1663
1e200e4a 1664 /* find the next higher power-of-2 of num queue pairs */
4e3b35b0 1665 num_qps = qcount;
41c445ff 1666 pow = 0;
41a1d04b 1667 while (num_qps && (BIT_ULL(pow) < qcount)) {
41c445ff
JB
1668 pow++;
1669 num_qps >>= 1;
1670 }
1671
1672 vsi->tc_config.tc_info[i].netdev_tc = netdev_tc++;
1673 qmap =
1674 (offset << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
1675 (pow << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT);
1676
4e3b35b0 1677 offset += qcount;
41c445ff
JB
1678 } else {
1679 /* TC is not enabled so set the offset to
1680 * default queue and allocate one queue
1681 * for the given TC.
1682 */
1683 vsi->tc_config.tc_info[i].qoffset = 0;
1684 vsi->tc_config.tc_info[i].qcount = 1;
1685 vsi->tc_config.tc_info[i].netdev_tc = 0;
1686
1687 qmap = 0;
1688 }
1689 ctxt->info.tc_mapping[i] = cpu_to_le16(qmap);
1690 }
1691
1692 /* Set actual Tx/Rx queue pairs */
1693 vsi->num_queue_pairs = offset;
9a3bd2f1
ASJ
1694 if ((vsi->type == I40E_VSI_MAIN) && (numtc == 1)) {
1695 if (vsi->req_queue_pairs > 0)
1696 vsi->num_queue_pairs = vsi->req_queue_pairs;
26cdc443 1697 else if (pf->flags & I40E_FLAG_MSIX_ENABLED)
9a3bd2f1
ASJ
1698 vsi->num_queue_pairs = pf->num_lan_msix;
1699 }
41c445ff
JB
1700
1701 /* Scheduler section valid can only be set for ADD VSI */
1702 if (is_add) {
1703 sections |= I40E_AQ_VSI_PROP_SCHED_VALID;
1704
1705 ctxt->info.up_enable_bits = enabled_tc;
1706 }
1707 if (vsi->type == I40E_VSI_SRIOV) {
1708 ctxt->info.mapping_flags |=
1709 cpu_to_le16(I40E_AQ_VSI_QUE_MAP_NONCONTIG);
1710 for (i = 0; i < vsi->num_queue_pairs; i++)
1711 ctxt->info.queue_mapping[i] =
1712 cpu_to_le16(vsi->base_queue + i);
1713 } else {
1714 ctxt->info.mapping_flags |=
1715 cpu_to_le16(I40E_AQ_VSI_QUE_MAP_CONTIG);
1716 ctxt->info.queue_mapping[0] = cpu_to_le16(vsi->base_queue);
1717 }
1718 ctxt->info.valid_sections |= cpu_to_le16(sections);
1719}
1720
6622f5cd
JK
1721/**
1722 * i40e_addr_sync - Callback for dev_(mc|uc)_sync to add address
1723 * @netdev: the netdevice
1724 * @addr: address to add
1725 *
1726 * Called by __dev_(mc|uc)_sync when an address needs to be added. We call
1727 * __dev_(uc|mc)_sync from .set_rx_mode and guarantee to hold the hash lock.
1728 */
1729static int i40e_addr_sync(struct net_device *netdev, const u8 *addr)
1730{
1731 struct i40e_netdev_priv *np = netdev_priv(netdev);
1732 struct i40e_vsi *vsi = np->vsi;
6622f5cd 1733
feffdbe4 1734 if (i40e_add_mac_filter(vsi, addr))
6622f5cd
JK
1735 return 0;
1736 else
1737 return -ENOMEM;
1738}
1739
1740/**
1741 * i40e_addr_unsync - Callback for dev_(mc|uc)_sync to remove address
1742 * @netdev: the netdevice
1743 * @addr: address to add
1744 *
1745 * Called by __dev_(mc|uc)_sync when an address needs to be removed. We call
1746 * __dev_(uc|mc)_sync from .set_rx_mode and guarantee to hold the hash lock.
1747 */
1748static int i40e_addr_unsync(struct net_device *netdev, const u8 *addr)
1749{
1750 struct i40e_netdev_priv *np = netdev_priv(netdev);
1751 struct i40e_vsi *vsi = np->vsi;
1752
feffdbe4 1753 i40e_del_mac_filter(vsi, addr);
6622f5cd
JK
1754
1755 return 0;
1756}
1757
41c445ff
JB
1758/**
1759 * i40e_set_rx_mode - NDO callback to set the netdev filters
1760 * @netdev: network interface device structure
1761 **/
1762static void i40e_set_rx_mode(struct net_device *netdev)
1763{
1764 struct i40e_netdev_priv *np = netdev_priv(netdev);
41c445ff 1765 struct i40e_vsi *vsi = np->vsi;
41c445ff 1766
278e7d0b 1767 spin_lock_bh(&vsi->mac_filter_hash_lock);
21659035 1768
6622f5cd
JK
1769 __dev_uc_sync(netdev, i40e_addr_sync, i40e_addr_unsync);
1770 __dev_mc_sync(netdev, i40e_addr_sync, i40e_addr_unsync);
41c445ff 1771
278e7d0b 1772 spin_unlock_bh(&vsi->mac_filter_hash_lock);
41c445ff
JB
1773
1774 /* check for other flag changes */
1775 if (vsi->current_netdev_flags != vsi->netdev->flags) {
1776 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
1777 vsi->back->flags |= I40E_FLAG_FILTER_SYNC;
1778 }
c53934c6
JB
1779
1780 /* schedule our worker thread which will take care of
1781 * applying the new filter changes
1782 */
1783 i40e_service_event_schedule(vsi->back);
41c445ff
JB
1784}
1785
21659035 1786/**
671889e6 1787 * i40e_undo_del_filter_entries - Undo the changes made to MAC filter entries
4a2ce27b 1788 * @vsi: Pointer to VSI struct
21659035
KP
1789 * @from: Pointer to list which contains MAC filter entries - changes to
1790 * those entries needs to be undone.
1791 *
671889e6 1792 * MAC filter entries from this list were slated for deletion.
21659035 1793 **/
671889e6
JK
1794static void i40e_undo_del_filter_entries(struct i40e_vsi *vsi,
1795 struct hlist_head *from)
21659035 1796{
278e7d0b
JK
1797 struct i40e_mac_filter *f;
1798 struct hlist_node *h;
1799
1800 hlist_for_each_entry_safe(f, h, from, hlist) {
1801 u64 key = i40e_addr_to_hkey(f->macaddr);
21659035 1802
21659035 1803 /* Move the element back into MAC filter list*/
278e7d0b
JK
1804 hlist_del(&f->hlist);
1805 hash_add(vsi->mac_filter_hash, &f->hlist, key);
21659035
KP
1806 }
1807}
1808
671889e6
JK
1809/**
1810 * i40e_undo_add_filter_entries - Undo the changes made to MAC filter entries
1811 * @vsi: Pointer to vsi struct
1812 * @from: Pointer to list which contains MAC filter entries - changes to
1813 * those entries needs to be undone.
1814 *
1815 * MAC filter entries from this list were slated for addition.
1816 **/
1817static void i40e_undo_add_filter_entries(struct i40e_vsi *vsi,
1818 struct hlist_head *from)
1819{
1820 struct i40e_new_mac_filter *new;
1821 struct hlist_node *h;
1822
1823 hlist_for_each_entry_safe(new, h, from, hlist) {
1824 /* We can simply free the wrapper structure */
1825 hlist_del(&new->hlist);
1826 kfree(new);
1827 }
1828}
1829
d88d40b0
JK
1830/**
1831 * i40e_next_entry - Get the next non-broadcast filter from a list
671889e6 1832 * @next: pointer to filter in list
d88d40b0
JK
1833 *
1834 * Returns the next non-broadcast filter in the list. Required so that we
1835 * ignore broadcast filters within the list, since these are not handled via
1836 * the normal firmware update path.
1837 */
671889e6
JK
1838static
1839struct i40e_new_mac_filter *i40e_next_filter(struct i40e_new_mac_filter *next)
d88d40b0 1840{
584a8870
JK
1841 hlist_for_each_entry_continue(next, hlist) {
1842 if (!is_broadcast_ether_addr(next->f->macaddr))
1843 return next;
d88d40b0
JK
1844 }
1845
584a8870 1846 return NULL;
d88d40b0
JK
1847}
1848
21659035 1849/**
c3c7ea27
MW
1850 * i40e_update_filter_state - Update filter state based on return data
1851 * from firmware
1852 * @count: Number of filters added
1853 * @add_list: return data from fw
1854 * @head: pointer to first filter in current batch
21659035 1855 *
c3c7ea27
MW
1856 * MAC filter entries from list were slated to be added to device. Returns
1857 * number of successful filters. Note that 0 does NOT mean success!
21659035 1858 **/
c3c7ea27
MW
1859static int
1860i40e_update_filter_state(int count,
1861 struct i40e_aqc_add_macvlan_element_data *add_list,
671889e6 1862 struct i40e_new_mac_filter *add_head)
21659035 1863{
c3c7ea27
MW
1864 int retval = 0;
1865 int i;
21659035 1866
ac9e2390
JK
1867 for (i = 0; i < count; i++) {
1868 /* Always check status of each filter. We don't need to check
1869 * the firmware return status because we pre-set the filter
1870 * status to I40E_AQC_MM_ERR_NO_RES when sending the filter
1871 * request to the adminq. Thus, if it no longer matches then
1872 * we know the filter is active.
c3c7ea27 1873 */
ac9e2390 1874 if (add_list[i].match_method == I40E_AQC_MM_ERR_NO_RES) {
c3c7ea27 1875 add_head->state = I40E_FILTER_FAILED;
ac9e2390
JK
1876 } else {
1877 add_head->state = I40E_FILTER_ACTIVE;
1878 retval++;
c3c7ea27 1879 }
ac9e2390 1880
d88d40b0
JK
1881 add_head = i40e_next_filter(add_head);
1882 if (!add_head)
1883 break;
21659035 1884 }
ac9e2390 1885
c3c7ea27 1886 return retval;
21659035
KP
1887}
1888
00936319
JK
1889/**
1890 * i40e_aqc_del_filters - Request firmware to delete a set of filters
1891 * @vsi: ptr to the VSI
1892 * @vsi_name: name to display in messages
1893 * @list: the list of filters to send to firmware
1894 * @num_del: the number of filters to delete
1895 * @retval: Set to -EIO on failure to delete
1896 *
1897 * Send a request to firmware via AdminQ to delete a set of filters. Uses
1898 * *retval instead of a return value so that success does not force ret_val to
1899 * be set to 0. This ensures that a sequence of calls to this function
1900 * preserve the previous value of *retval on successful delete.
1901 */
1902static
1903void i40e_aqc_del_filters(struct i40e_vsi *vsi, const char *vsi_name,
1904 struct i40e_aqc_remove_macvlan_element_data *list,
1905 int num_del, int *retval)
1906{
1907 struct i40e_hw *hw = &vsi->back->hw;
1908 i40e_status aq_ret;
1909 int aq_err;
1910
1911 aq_ret = i40e_aq_remove_macvlan(hw, vsi->seid, list, num_del, NULL);
1912 aq_err = hw->aq.asq_last_status;
1913
1914 /* Explicitly ignore and do not report when firmware returns ENOENT */
1915 if (aq_ret && !(aq_err == I40E_AQ_RC_ENOENT)) {
1916 *retval = -EIO;
1917 dev_info(&vsi->back->pdev->dev,
1918 "ignoring delete macvlan error on %s, err %s, aq_err %s\n",
1919 vsi_name, i40e_stat_str(hw, aq_ret),
1920 i40e_aq_str(hw, aq_err));
1921 }
1922}
1923
1924/**
1925 * i40e_aqc_add_filters - Request firmware to add a set of filters
1926 * @vsi: ptr to the VSI
1927 * @vsi_name: name to display in messages
1928 * @list: the list of filters to send to firmware
1929 * @add_head: Position in the add hlist
1930 * @num_add: the number of filters to add
1931 * @promisc_change: set to true on exit if promiscuous mode was forced on
1932 *
1933 * Send a request to firmware via AdminQ to add a chunk of filters. Will set
1934 * promisc_changed to true if the firmware has run out of space for more
1935 * filters.
1936 */
1937static
1938void i40e_aqc_add_filters(struct i40e_vsi *vsi, const char *vsi_name,
1939 struct i40e_aqc_add_macvlan_element_data *list,
671889e6 1940 struct i40e_new_mac_filter *add_head,
00936319
JK
1941 int num_add, bool *promisc_changed)
1942{
1943 struct i40e_hw *hw = &vsi->back->hw;
00936319
JK
1944 int aq_err, fcnt;
1945
ac9e2390 1946 i40e_aq_add_macvlan(hw, vsi->seid, list, num_add, NULL);
00936319 1947 aq_err = hw->aq.asq_last_status;
ac9e2390 1948 fcnt = i40e_update_filter_state(num_add, list, add_head);
00936319
JK
1949
1950 if (fcnt != num_add) {
1951 *promisc_changed = true;
0da36b97 1952 set_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
00936319
JK
1953 dev_warn(&vsi->back->pdev->dev,
1954 "Error %s adding RX filters on %s, promiscuous mode forced on\n",
1955 i40e_aq_str(hw, aq_err),
1956 vsi_name);
1957 }
1958}
1959
435c084a
JK
1960/**
1961 * i40e_aqc_broadcast_filter - Set promiscuous broadcast flags
1962 * @vsi: pointer to the VSI
1963 * @f: filter data
1964 *
1965 * This function sets or clears the promiscuous broadcast flags for VLAN
1966 * filters in order to properly receive broadcast frames. Assumes that only
1967 * broadcast filters are passed.
671889e6
JK
1968 *
1969 * Returns status indicating success or failure;
435c084a 1970 **/
671889e6
JK
1971static i40e_status
1972i40e_aqc_broadcast_filter(struct i40e_vsi *vsi, const char *vsi_name,
1973 struct i40e_mac_filter *f)
435c084a
JK
1974{
1975 bool enable = f->state == I40E_FILTER_NEW;
1976 struct i40e_hw *hw = &vsi->back->hw;
1977 i40e_status aq_ret;
1978
1979 if (f->vlan == I40E_VLAN_ANY) {
1980 aq_ret = i40e_aq_set_vsi_broadcast(hw,
1981 vsi->seid,
1982 enable,
1983 NULL);
1984 } else {
1985 aq_ret = i40e_aq_set_vsi_bc_promisc_on_vlan(hw,
1986 vsi->seid,
1987 enable,
1988 f->vlan,
1989 NULL);
1990 }
1991
671889e6 1992 if (aq_ret)
435c084a
JK
1993 dev_warn(&vsi->back->pdev->dev,
1994 "Error %s setting broadcast promiscuous mode on %s\n",
1995 i40e_aq_str(hw, hw->aq.asq_last_status),
1996 vsi_name);
671889e6
JK
1997
1998 return aq_ret;
435c084a
JK
1999}
2000
41c445ff
JB
2001/**
2002 * i40e_sync_vsi_filters - Update the VSI filter list to the HW
2003 * @vsi: ptr to the VSI
2004 *
2005 * Push any outstanding VSI filter changes through the AdminQ.
2006 *
2007 * Returns 0 or error value
2008 **/
17652c63 2009int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
41c445ff 2010{
278e7d0b 2011 struct hlist_head tmp_add_list, tmp_del_list;
671889e6
JK
2012 struct i40e_mac_filter *f;
2013 struct i40e_new_mac_filter *new, *add_head = NULL;
3e25a8f3 2014 struct i40e_hw *hw = &vsi->back->hw;
38326218 2015 unsigned int failed_filters = 0;
84f5ca6c 2016 unsigned int vlan_filters = 0;
c3c7ea27 2017 bool promisc_changed = false;
2d1de828 2018 char vsi_name[16] = "PF";
41c445ff 2019 int filter_list_len = 0;
ea02e90b 2020 i40e_status aq_ret = 0;
84f5ca6c 2021 u32 changed_flags = 0;
278e7d0b 2022 struct hlist_node *h;
41c445ff
JB
2023 struct i40e_pf *pf;
2024 int num_add = 0;
2025 int num_del = 0;
84f5ca6c 2026 int retval = 0;
41c445ff 2027 u16 cmd_flags;
c3c7ea27 2028 int list_size;
278e7d0b 2029 int bkt;
41c445ff
JB
2030
2031 /* empty array typed pointers, kcalloc later */
2032 struct i40e_aqc_add_macvlan_element_data *add_list;
2033 struct i40e_aqc_remove_macvlan_element_data *del_list;
2034
0da36b97 2035 while (test_and_set_bit(__I40E_VSI_SYNCING_FILTERS, vsi->state))
41c445ff
JB
2036 usleep_range(1000, 2000);
2037 pf = vsi->back;
2038
2039 if (vsi->netdev) {
2040 changed_flags = vsi->current_netdev_flags ^ vsi->netdev->flags;
2041 vsi->current_netdev_flags = vsi->netdev->flags;
2042 }
2043
278e7d0b
JK
2044 INIT_HLIST_HEAD(&tmp_add_list);
2045 INIT_HLIST_HEAD(&tmp_del_list);
21659035 2046
2d1de828
SN
2047 if (vsi->type == I40E_VSI_SRIOV)
2048 snprintf(vsi_name, sizeof(vsi_name) - 1, "VF %d", vsi->vf_id);
2049 else if (vsi->type != I40E_VSI_MAIN)
2050 snprintf(vsi_name, sizeof(vsi_name) - 1, "vsi %d", vsi->seid);
2051
41c445ff
JB
2052 if (vsi->flags & I40E_VSI_FLAG_FILTER_CHANGED) {
2053 vsi->flags &= ~I40E_VSI_FLAG_FILTER_CHANGED;
2054
278e7d0b 2055 spin_lock_bh(&vsi->mac_filter_hash_lock);
c3c7ea27 2056 /* Create a list of filters to delete. */
278e7d0b 2057 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
c3c7ea27 2058 if (f->state == I40E_FILTER_REMOVE) {
c3c7ea27 2059 /* Move the element into temporary del_list */
278e7d0b
JK
2060 hash_del(&f->hlist);
2061 hlist_add_head(&f->hlist, &tmp_del_list);
84f5ca6c
AB
2062
2063 /* Avoid counting removed filters */
2064 continue;
c3c7ea27
MW
2065 }
2066 if (f->state == I40E_FILTER_NEW) {
671889e6
JK
2067 /* Create a temporary i40e_new_mac_filter */
2068 new = kzalloc(sizeof(*new), GFP_ATOMIC);
2069 if (!new)
2070 goto err_no_memory_locked;
2071
2072 /* Store pointer to the real filter */
2073 new->f = f;
2074 new->state = f->state;
2075
2076 /* Add it to the hash list */
2077 hlist_add_head(&new->hlist, &tmp_add_list);
21659035 2078 }
84f5ca6c 2079
489a3265
JK
2080 /* Count the number of active (current and new) VLAN
2081 * filters we have now. Does not count filters which
2082 * are marked for deletion.
84f5ca6c
AB
2083 */
2084 if (f->vlan > 0)
2085 vlan_filters++;
84f5ca6c
AB
2086 }
2087
489a3265
JK
2088 retval = i40e_correct_mac_vlan_filters(vsi,
2089 &tmp_add_list,
2090 &tmp_del_list,
2091 vlan_filters);
2092 if (retval)
2093 goto err_no_memory_locked;
84f5ca6c 2094
278e7d0b 2095 spin_unlock_bh(&vsi->mac_filter_hash_lock);
21659035
KP
2096 }
2097
2098 /* Now process 'del_list' outside the lock */
278e7d0b 2099 if (!hlist_empty(&tmp_del_list)) {
3e25a8f3 2100 filter_list_len = hw->aq.asq_buf_size /
21659035 2101 sizeof(struct i40e_aqc_remove_macvlan_element_data);
c3c7ea27 2102 list_size = filter_list_len *
f1199998 2103 sizeof(struct i40e_aqc_remove_macvlan_element_data);
c3c7ea27 2104 del_list = kzalloc(list_size, GFP_ATOMIC);
4a2ce27b
JK
2105 if (!del_list)
2106 goto err_no_memory;
21659035 2107
278e7d0b 2108 hlist_for_each_entry_safe(f, h, &tmp_del_list, hlist) {
41c445ff
JB
2109 cmd_flags = 0;
2110
435c084a 2111 /* handle broadcast filters by updating the broadcast
d88d40b0 2112 * promiscuous flag and release filter list.
435c084a
JK
2113 */
2114 if (is_broadcast_ether_addr(f->macaddr)) {
2115 i40e_aqc_broadcast_filter(vsi, vsi_name, f);
2116
2117 hlist_del(&f->hlist);
2118 kfree(f);
2119 continue;
2120 }
2121
41c445ff 2122 /* add to delete list */
9a173901 2123 ether_addr_copy(del_list[num_del].mac_addr, f->macaddr);
c3c7ea27
MW
2124 if (f->vlan == I40E_VLAN_ANY) {
2125 del_list[num_del].vlan_tag = 0;
a6cb9146 2126 cmd_flags |= I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
c3c7ea27
MW
2127 } else {
2128 del_list[num_del].vlan_tag =
2129 cpu_to_le16((u16)(f->vlan));
2130 }
41c445ff 2131
41c445ff
JB
2132 cmd_flags |= I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
2133 del_list[num_del].flags = cmd_flags;
2134 num_del++;
2135
41c445ff
JB
2136 /* flush a full buffer */
2137 if (num_del == filter_list_len) {
00936319
JK
2138 i40e_aqc_del_filters(vsi, vsi_name, del_list,
2139 num_del, &retval);
c3c7ea27 2140 memset(del_list, 0, list_size);
00936319 2141 num_del = 0;
41c445ff 2142 }
21659035
KP
2143 /* Release memory for MAC filter entries which were
2144 * synced up with HW.
2145 */
278e7d0b 2146 hlist_del(&f->hlist);
21659035 2147 kfree(f);
41c445ff 2148 }
21659035 2149
41c445ff 2150 if (num_del) {
00936319
JK
2151 i40e_aqc_del_filters(vsi, vsi_name, del_list,
2152 num_del, &retval);
41c445ff
JB
2153 }
2154
2155 kfree(del_list);
2156 del_list = NULL;
21659035
KP
2157 }
2158
278e7d0b 2159 if (!hlist_empty(&tmp_add_list)) {
c3c7ea27 2160 /* Do all the adds now. */
3e25a8f3 2161 filter_list_len = hw->aq.asq_buf_size /
f1199998 2162 sizeof(struct i40e_aqc_add_macvlan_element_data);
c3c7ea27
MW
2163 list_size = filter_list_len *
2164 sizeof(struct i40e_aqc_add_macvlan_element_data);
2165 add_list = kzalloc(list_size, GFP_ATOMIC);
4a2ce27b
JK
2166 if (!add_list)
2167 goto err_no_memory;
2168
c3c7ea27 2169 num_add = 0;
671889e6 2170 hlist_for_each_entry_safe(new, h, &tmp_add_list, hlist) {
d19cb64b 2171 if (test_bit(__I40E_VSI_OVERFLOW_PROMISC,
0da36b97 2172 vsi->state)) {
671889e6 2173 new->state = I40E_FILTER_FAILED;
c3c7ea27
MW
2174 continue;
2175 }
435c084a
JK
2176
2177 /* handle broadcast filters by updating the broadcast
2178 * promiscuous flag instead of adding a MAC filter.
2179 */
671889e6
JK
2180 if (is_broadcast_ether_addr(new->f->macaddr)) {
2181 if (i40e_aqc_broadcast_filter(vsi, vsi_name,
2182 new->f))
2183 new->state = I40E_FILTER_FAILED;
2184 else
2185 new->state = I40E_FILTER_ACTIVE;
435c084a
JK
2186 continue;
2187 }
2188
41c445ff 2189 /* add to add array */
c3c7ea27 2190 if (num_add == 0)
671889e6 2191 add_head = new;
c3c7ea27 2192 cmd_flags = 0;
671889e6
JK
2193 ether_addr_copy(add_list[num_add].mac_addr,
2194 new->f->macaddr);
2195 if (new->f->vlan == I40E_VLAN_ANY) {
c3c7ea27
MW
2196 add_list[num_add].vlan_tag = 0;
2197 cmd_flags |= I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
2198 } else {
2199 add_list[num_add].vlan_tag =
671889e6 2200 cpu_to_le16((u16)(new->f->vlan));
c3c7ea27 2201 }
41c445ff 2202 add_list[num_add].queue_number = 0;
ac9e2390 2203 /* set invalid match method for later detection */
0266ac45 2204 add_list[num_add].match_method = I40E_AQC_MM_ERR_NO_RES;
41c445ff 2205 cmd_flags |= I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
41c445ff
JB
2206 add_list[num_add].flags = cpu_to_le16(cmd_flags);
2207 num_add++;
2208
2209 /* flush a full buffer */
2210 if (num_add == filter_list_len) {
00936319
JK
2211 i40e_aqc_add_filters(vsi, vsi_name, add_list,
2212 add_head, num_add,
2213 &promisc_changed);
c3c7ea27 2214 memset(add_list, 0, list_size);
41c445ff 2215 num_add = 0;
41c445ff
JB
2216 }
2217 }
2218 if (num_add) {
00936319
JK
2219 i40e_aqc_add_filters(vsi, vsi_name, add_list, add_head,
2220 num_add, &promisc_changed);
41c445ff 2221 }
c3c7ea27
MW
2222 /* Now move all of the filters from the temp add list back to
2223 * the VSI's list.
2224 */
278e7d0b 2225 spin_lock_bh(&vsi->mac_filter_hash_lock);
671889e6
JK
2226 hlist_for_each_entry_safe(new, h, &tmp_add_list, hlist) {
2227 /* Only update the state if we're still NEW */
2228 if (new->f->state == I40E_FILTER_NEW)
2229 new->f->state = new->state;
2230 hlist_del(&new->hlist);
2231 kfree(new);
c3c7ea27 2232 }
278e7d0b 2233 spin_unlock_bh(&vsi->mac_filter_hash_lock);
41c445ff
JB
2234 kfree(add_list);
2235 add_list = NULL;
c3c7ea27 2236 }
41c445ff 2237
38326218
JK
2238 /* Determine the number of active and failed filters. */
2239 spin_lock_bh(&vsi->mac_filter_hash_lock);
2240 vsi->active_filters = 0;
2241 hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) {
2242 if (f->state == I40E_FILTER_ACTIVE)
2243 vsi->active_filters++;
2244 else if (f->state == I40E_FILTER_FAILED)
2245 failed_filters++;
2246 }
2247 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2248
2249 /* If promiscuous mode has changed, we need to calculate a new
2250 * threshold for when we are safe to exit
2251 */
2252 if (promisc_changed)
2253 vsi->promisc_threshold = (vsi->active_filters * 3) / 4;
2254
2255 /* Check if we are able to exit overflow promiscuous mode. We can
2256 * safely exit if we didn't just enter, we no longer have any failed
2257 * filters, and we have reduced filters below the threshold value.
2258 */
0da36b97 2259 if (test_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state) &&
38326218 2260 !promisc_changed && !failed_filters &&
c3c7ea27 2261 (vsi->active_filters < vsi->promisc_threshold)) {
38326218
JK
2262 dev_info(&pf->pdev->dev,
2263 "filter logjam cleared on %s, leaving overflow promiscuous mode\n",
2264 vsi_name);
0da36b97 2265 clear_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
38326218
JK
2266 promisc_changed = true;
2267 vsi->promisc_threshold = 0;
41c445ff
JB
2268 }
2269
a856b5cb
ASJ
2270 /* if the VF is not trusted do not do promisc */
2271 if ((vsi->type == I40E_VSI_SRIOV) && !pf->vf[vsi->vf_id].trusted) {
0da36b97 2272 clear_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
a856b5cb
ASJ
2273 goto out;
2274 }
2275
41c445ff
JB
2276 /* check for changes in promiscuous modes */
2277 if (changed_flags & IFF_ALLMULTI) {
2278 bool cur_multipromisc;
6995b36c 2279
41c445ff 2280 cur_multipromisc = !!(vsi->current_netdev_flags & IFF_ALLMULTI);
ea02e90b
MW
2281 aq_ret = i40e_aq_set_vsi_multicast_promiscuous(&vsi->back->hw,
2282 vsi->seid,
2283 cur_multipromisc,
2284 NULL);
2285 if (aq_ret) {
2286 retval = i40e_aq_rc_to_posix(aq_ret,
3e25a8f3 2287 hw->aq.asq_last_status);
41c445ff 2288 dev_info(&pf->pdev->dev,
2d1de828
SN
2289 "set multi promisc failed on %s, err %s aq_err %s\n",
2290 vsi_name,
3e25a8f3
MW
2291 i40e_stat_str(hw, aq_ret),
2292 i40e_aq_str(hw, hw->aq.asq_last_status));
ea02e90b 2293 }
41c445ff 2294 }
e5887239
AB
2295
2296 if ((changed_flags & IFF_PROMISC) || promisc_changed) {
41c445ff 2297 bool cur_promisc;
6995b36c 2298
41c445ff 2299 cur_promisc = (!!(vsi->current_netdev_flags & IFF_PROMISC) ||
d19cb64b 2300 test_bit(__I40E_VSI_OVERFLOW_PROMISC,
0da36b97 2301 vsi->state));
6784ed5a
ASJ
2302 if ((vsi->type == I40E_VSI_MAIN) &&
2303 (pf->lan_veb != I40E_NO_VEB) &&
2304 !(pf->flags & I40E_FLAG_MFP_ENABLED)) {
92faef85
ASJ
2305 /* set defport ON for Main VSI instead of true promisc
2306 * this way we will get all unicast/multicast and VLAN
2307 * promisc behavior but will not get VF or VMDq traffic
2308 * replicated on the Main VSI.
2309 */
2310 if (pf->cur_promisc != cur_promisc) {
2311 pf->cur_promisc = cur_promisc;
5bc16031
MW
2312 if (cur_promisc)
2313 aq_ret =
2314 i40e_aq_set_default_vsi(hw,
2315 vsi->seid,
2316 NULL);
2317 else
2318 aq_ret =
2319 i40e_aq_clear_default_vsi(hw,
2320 vsi->seid,
2321 NULL);
2322 if (aq_ret) {
2323 retval = i40e_aq_rc_to_posix(aq_ret,
2324 hw->aq.asq_last_status);
2325 dev_info(&pf->pdev->dev,
2d1de828
SN
2326 "Set default VSI failed on %s, err %s, aq_err %s\n",
2327 vsi_name,
5bc16031
MW
2328 i40e_stat_str(hw, aq_ret),
2329 i40e_aq_str(hw,
2330 hw->aq.asq_last_status));
2331 }
92faef85
ASJ
2332 }
2333 } else {
ea02e90b 2334 aq_ret = i40e_aq_set_vsi_unicast_promiscuous(
3e25a8f3 2335 hw,
f1c7e72e 2336 vsi->seid,
b5569892
ASJ
2337 cur_promisc, NULL,
2338 true);
ea02e90b
MW
2339 if (aq_ret) {
2340 retval =
2341 i40e_aq_rc_to_posix(aq_ret,
3e25a8f3 2342 hw->aq.asq_last_status);
92faef85 2343 dev_info(&pf->pdev->dev,
2d1de828
SN
2344 "set unicast promisc failed on %s, err %s, aq_err %s\n",
2345 vsi_name,
3e25a8f3
MW
2346 i40e_stat_str(hw, aq_ret),
2347 i40e_aq_str(hw,
2348 hw->aq.asq_last_status));
ea02e90b
MW
2349 }
2350 aq_ret = i40e_aq_set_vsi_multicast_promiscuous(
3e25a8f3 2351 hw,
92faef85
ASJ
2352 vsi->seid,
2353 cur_promisc, NULL);
ea02e90b
MW
2354 if (aq_ret) {
2355 retval =
2356 i40e_aq_rc_to_posix(aq_ret,
3e25a8f3 2357 hw->aq.asq_last_status);
92faef85 2358 dev_info(&pf->pdev->dev,
2d1de828
SN
2359 "set multicast promisc failed on %s, err %s, aq_err %s\n",
2360 vsi_name,
3e25a8f3
MW
2361 i40e_stat_str(hw, aq_ret),
2362 i40e_aq_str(hw,
2363 hw->aq.asq_last_status));
ea02e90b 2364 }
92faef85 2365 }
ea02e90b
MW
2366 aq_ret = i40e_aq_set_vsi_broadcast(&vsi->back->hw,
2367 vsi->seid,
2368 cur_promisc, NULL);
2369 if (aq_ret) {
2370 retval = i40e_aq_rc_to_posix(aq_ret,
2371 pf->hw.aq.asq_last_status);
1a10370a 2372 dev_info(&pf->pdev->dev,
f1c7e72e 2373 "set brdcast promisc failed, err %s, aq_err %s\n",
3e25a8f3
MW
2374 i40e_stat_str(hw, aq_ret),
2375 i40e_aq_str(hw,
2376 hw->aq.asq_last_status));
ea02e90b 2377 }
41c445ff 2378 }
ea02e90b 2379out:
2818ccd9
JB
2380 /* if something went wrong then set the changed flag so we try again */
2381 if (retval)
2382 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
2383
0da36b97 2384 clear_bit(__I40E_VSI_SYNCING_FILTERS, vsi->state);
ea02e90b 2385 return retval;
4a2ce27b
JK
2386
2387err_no_memory:
2388 /* Restore elements on the temporary add and delete lists */
2389 spin_lock_bh(&vsi->mac_filter_hash_lock);
84f5ca6c 2390err_no_memory_locked:
671889e6
JK
2391 i40e_undo_del_filter_entries(vsi, &tmp_del_list);
2392 i40e_undo_add_filter_entries(vsi, &tmp_add_list);
4a2ce27b
JK
2393 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2394
2395 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
0da36b97 2396 clear_bit(__I40E_VSI_SYNCING_FILTERS, vsi->state);
4a2ce27b 2397 return -ENOMEM;
41c445ff
JB
2398}
2399
2400/**
2401 * i40e_sync_filters_subtask - Sync the VSI filter list with HW
2402 * @pf: board private structure
2403 **/
2404static void i40e_sync_filters_subtask(struct i40e_pf *pf)
2405{
2406 int v;
2407
2408 if (!pf || !(pf->flags & I40E_FLAG_FILTER_SYNC))
2409 return;
2410 pf->flags &= ~I40E_FLAG_FILTER_SYNC;
2411
505682cd 2412 for (v = 0; v < pf->num_alloc_vsi; v++) {
41c445ff 2413 if (pf->vsi[v] &&
17652c63
JB
2414 (pf->vsi[v]->flags & I40E_VSI_FLAG_FILTER_CHANGED)) {
2415 int ret = i40e_sync_vsi_filters(pf->vsi[v]);
2416
2417 if (ret) {
2418 /* come back and try again later */
2419 pf->flags |= I40E_FLAG_FILTER_SYNC;
2420 break;
2421 }
2422 }
41c445ff
JB
2423 }
2424}
2425
0c8493d9
BT
2426/**
2427 * i40e_max_xdp_frame_size - returns the maximum allowed frame size for XDP
2428 * @vsi: the vsi
2429 **/
2430static int i40e_max_xdp_frame_size(struct i40e_vsi *vsi)
2431{
2432 if (PAGE_SIZE >= 8192 || (vsi->back->flags & I40E_FLAG_LEGACY_RX))
2433 return I40E_RXBUFFER_2048;
2434 else
2435 return I40E_RXBUFFER_3072;
2436}
2437
41c445ff
JB
2438/**
2439 * i40e_change_mtu - NDO callback to change the Maximum Transfer Unit
2440 * @netdev: network interface device structure
2441 * @new_mtu: new value for maximum frame size
2442 *
2443 * Returns 0 on success, negative on failure
2444 **/
2445static int i40e_change_mtu(struct net_device *netdev, int new_mtu)
2446{
2447 struct i40e_netdev_priv *np = netdev_priv(netdev);
41c445ff 2448 struct i40e_vsi *vsi = np->vsi;
0ef2d5af 2449 struct i40e_pf *pf = vsi->back;
41c445ff 2450
0c8493d9
BT
2451 if (i40e_enabled_xdp_vsi(vsi)) {
2452 int frame_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
2453
2454 if (frame_size > i40e_max_xdp_frame_size(vsi))
2455 return -EINVAL;
2456 }
2457
41c445ff
JB
2458 netdev_info(netdev, "changing MTU from %d to %d\n",
2459 netdev->mtu, new_mtu);
2460 netdev->mtu = new_mtu;
2461 if (netif_running(netdev))
2462 i40e_vsi_reinit_locked(vsi);
0ef2d5af
MW
2463 pf->flags |= (I40E_FLAG_SERVICE_CLIENT_REQUESTED |
2464 I40E_FLAG_CLIENT_L2_CHANGE);
41c445ff
JB
2465 return 0;
2466}
2467
beb0dff1
JK
2468/**
2469 * i40e_ioctl - Access the hwtstamp interface
2470 * @netdev: network interface device structure
2471 * @ifr: interface request data
2472 * @cmd: ioctl command
2473 **/
2474int i40e_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
2475{
2476 struct i40e_netdev_priv *np = netdev_priv(netdev);
2477 struct i40e_pf *pf = np->vsi->back;
2478
2479 switch (cmd) {
2480 case SIOCGHWTSTAMP:
2481 return i40e_ptp_get_ts_config(pf, ifr);
2482 case SIOCSHWTSTAMP:
2483 return i40e_ptp_set_ts_config(pf, ifr);
2484 default:
2485 return -EOPNOTSUPP;
2486 }
2487}
2488
41c445ff
JB
2489/**
2490 * i40e_vlan_stripping_enable - Turn on vlan stripping for the VSI
2491 * @vsi: the vsi being adjusted
2492 **/
2493void i40e_vlan_stripping_enable(struct i40e_vsi *vsi)
2494{
2495 struct i40e_vsi_context ctxt;
2496 i40e_status ret;
2497
2498 if ((vsi->info.valid_sections &
2499 cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID)) &&
2500 ((vsi->info.port_vlan_flags & I40E_AQ_VSI_PVLAN_MODE_MASK) == 0))
2501 return; /* already enabled */
2502
2503 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
2504 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
2505 I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
2506
2507 ctxt.seid = vsi->seid;
1a2f6248 2508 ctxt.info = vsi->info;
41c445ff
JB
2509 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
2510 if (ret) {
2511 dev_info(&vsi->back->pdev->dev,
f1c7e72e
SN
2512 "update vlan stripping failed, err %s aq_err %s\n",
2513 i40e_stat_str(&vsi->back->hw, ret),
2514 i40e_aq_str(&vsi->back->hw,
2515 vsi->back->hw.aq.asq_last_status));
41c445ff
JB
2516 }
2517}
2518
2519/**
2520 * i40e_vlan_stripping_disable - Turn off vlan stripping for the VSI
2521 * @vsi: the vsi being adjusted
2522 **/
2523void i40e_vlan_stripping_disable(struct i40e_vsi *vsi)
2524{
2525 struct i40e_vsi_context ctxt;
2526 i40e_status ret;
2527
2528 if ((vsi->info.valid_sections &
2529 cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID)) &&
2530 ((vsi->info.port_vlan_flags & I40E_AQ_VSI_PVLAN_EMOD_MASK) ==
2531 I40E_AQ_VSI_PVLAN_EMOD_MASK))
2532 return; /* already disabled */
2533
2534 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
2535 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
2536 I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
2537
2538 ctxt.seid = vsi->seid;
1a2f6248 2539 ctxt.info = vsi->info;
41c445ff
JB
2540 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
2541 if (ret) {
2542 dev_info(&vsi->back->pdev->dev,
f1c7e72e
SN
2543 "update vlan stripping failed, err %s aq_err %s\n",
2544 i40e_stat_str(&vsi->back->hw, ret),
2545 i40e_aq_str(&vsi->back->hw,
2546 vsi->back->hw.aq.asq_last_status));
41c445ff
JB
2547 }
2548}
2549
2550/**
2551 * i40e_vlan_rx_register - Setup or shutdown vlan offload
2552 * @netdev: network interface to be adjusted
2553 * @features: netdev features to test if VLAN offload is enabled or not
2554 **/
2555static void i40e_vlan_rx_register(struct net_device *netdev, u32 features)
2556{
2557 struct i40e_netdev_priv *np = netdev_priv(netdev);
2558 struct i40e_vsi *vsi = np->vsi;
2559
2560 if (features & NETIF_F_HW_VLAN_CTAG_RX)
2561 i40e_vlan_stripping_enable(vsi);
2562 else
2563 i40e_vlan_stripping_disable(vsi);
2564}
2565
2566/**
490a4ad3 2567 * i40e_add_vlan_all_mac - Add a MAC/VLAN filter for each existing MAC address
41c445ff
JB
2568 * @vsi: the vsi being configured
2569 * @vid: vlan id to be added (0 = untagged only , -1 = any)
490a4ad3
JK
2570 *
2571 * This is a helper function for adding a new MAC/VLAN filter with the
2572 * specified VLAN for each existing MAC address already in the hash table.
2573 * This function does *not* perform any accounting to update filters based on
2574 * VLAN mode.
2575 *
2576 * NOTE: this function expects to be called while under the
2577 * mac_filter_hash_lock
41c445ff 2578 **/
9af52f60 2579int i40e_add_vlan_all_mac(struct i40e_vsi *vsi, s16 vid)
41c445ff 2580{
490a4ad3 2581 struct i40e_mac_filter *f, *add_f;
278e7d0b
JK
2582 struct hlist_node *h;
2583 int bkt;
41c445ff 2584
278e7d0b 2585 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
57b341d6
JK
2586 if (f->state == I40E_FILTER_REMOVE)
2587 continue;
1bc87e80 2588 add_f = i40e_add_filter(vsi, f->macaddr, vid);
41c445ff
JB
2589 if (!add_f) {
2590 dev_info(&vsi->back->pdev->dev,
2591 "Could not add vlan filter %d for %pM\n",
2592 vid, f->macaddr);
2593 return -ENOMEM;
2594 }
2595 }
2596
490a4ad3
JK
2597 return 0;
2598}
2599
2600/**
2601 * i40e_vsi_add_vlan - Add VSI membership for given VLAN
2602 * @vsi: the VSI being configured
f94484b7 2603 * @vid: VLAN id to be added
490a4ad3 2604 **/
f94484b7 2605int i40e_vsi_add_vlan(struct i40e_vsi *vsi, u16 vid)
490a4ad3 2606{
489a3265 2607 int err;
490a4ad3 2608
fcf6cfc8 2609 if (vsi->info.pvid)
f94484b7
JK
2610 return -EINVAL;
2611
fcf6cfc8
JK
2612 /* The network stack will attempt to add VID=0, with the intention to
2613 * receive priority tagged packets with a VLAN of 0. Our HW receives
2614 * these packets by default when configured to receive untagged
2615 * packets, so we don't need to add a filter for this case.
2616 * Additionally, HW interprets adding a VID=0 filter as meaning to
2617 * receive *only* tagged traffic and stops receiving untagged traffic.
2618 * Thus, we do not want to actually add a filter for VID=0
2619 */
2620 if (!vid)
2621 return 0;
2622
490a4ad3
JK
2623 /* Locked once because all functions invoked below iterates list*/
2624 spin_lock_bh(&vsi->mac_filter_hash_lock);
490a4ad3 2625 err = i40e_add_vlan_all_mac(vsi, vid);
278e7d0b 2626 spin_unlock_bh(&vsi->mac_filter_hash_lock);
489a3265
JK
2627 if (err)
2628 return err;
21659035 2629
0e4425ed
JB
2630 /* schedule our worker thread which will take care of
2631 * applying the new filter changes
2632 */
2633 i40e_service_event_schedule(vsi->back);
2634 return 0;
41c445ff
JB
2635}
2636
2637/**
490a4ad3 2638 * i40e_rm_vlan_all_mac - Remove MAC/VLAN pair for all MAC with the given VLAN
41c445ff
JB
2639 * @vsi: the vsi being configured
2640 * @vid: vlan id to be removed (0 = untagged only , -1 = any)
490a4ad3
JK
2641 *
2642 * This function should be used to remove all VLAN filters which match the
2643 * given VID. It does not schedule the service event and does not take the
2644 * mac_filter_hash_lock so it may be combined with other operations under
2645 * a single invocation of the mac_filter_hash_lock.
2646 *
2647 * NOTE: this function expects to be called while under the
2648 * mac_filter_hash_lock
2649 */
9af52f60 2650void i40e_rm_vlan_all_mac(struct i40e_vsi *vsi, s16 vid)
41c445ff 2651{
84f5ca6c 2652 struct i40e_mac_filter *f;
278e7d0b 2653 struct hlist_node *h;
278e7d0b 2654 int bkt;
41c445ff 2655
278e7d0b 2656 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
290d2557
JK
2657 if (f->vlan == vid)
2658 __i40e_del_filter(vsi, f);
2659 }
490a4ad3 2660}
41c445ff 2661
490a4ad3
JK
2662/**
2663 * i40e_vsi_kill_vlan - Remove VSI membership for given VLAN
2664 * @vsi: the VSI being configured
f94484b7 2665 * @vid: VLAN id to be removed
490a4ad3 2666 **/
f94484b7 2667void i40e_vsi_kill_vlan(struct i40e_vsi *vsi, u16 vid)
490a4ad3 2668{
f94484b7
JK
2669 if (!vid || vsi->info.pvid)
2670 return;
2671
490a4ad3
JK
2672 spin_lock_bh(&vsi->mac_filter_hash_lock);
2673 i40e_rm_vlan_all_mac(vsi, vid);
278e7d0b 2674 spin_unlock_bh(&vsi->mac_filter_hash_lock);
21659035 2675
0e4425ed
JB
2676 /* schedule our worker thread which will take care of
2677 * applying the new filter changes
2678 */
2679 i40e_service_event_schedule(vsi->back);
41c445ff
JB
2680}
2681
2682/**
2683 * i40e_vlan_rx_add_vid - Add a vlan id filter to HW offload
2684 * @netdev: network interface to be adjusted
2685 * @vid: vlan id to be added
078b5876
JB
2686 *
2687 * net_device_ops implementation for adding vlan ids
41c445ff
JB
2688 **/
2689static int i40e_vlan_rx_add_vid(struct net_device *netdev,
2690 __always_unused __be16 proto, u16 vid)
2691{
2692 struct i40e_netdev_priv *np = netdev_priv(netdev);
2693 struct i40e_vsi *vsi = np->vsi;
078b5876 2694 int ret = 0;
41c445ff 2695
6a112785 2696 if (vid >= VLAN_N_VID)
078b5876
JB
2697 return -EINVAL;
2698
fcf6cfc8 2699 ret = i40e_vsi_add_vlan(vsi, vid);
6a112785 2700 if (!ret)
078b5876 2701 set_bit(vid, vsi->active_vlans);
41c445ff 2702
078b5876 2703 return ret;
41c445ff
JB
2704}
2705
2706/**
2707 * i40e_vlan_rx_kill_vid - Remove a vlan id filter from HW offload
2708 * @netdev: network interface to be adjusted
2709 * @vid: vlan id to be removed
078b5876 2710 *
fdfd943e 2711 * net_device_ops implementation for removing vlan ids
41c445ff
JB
2712 **/
2713static int i40e_vlan_rx_kill_vid(struct net_device *netdev,
2714 __always_unused __be16 proto, u16 vid)
2715{
2716 struct i40e_netdev_priv *np = netdev_priv(netdev);
2717 struct i40e_vsi *vsi = np->vsi;
2718
41c445ff
JB
2719 /* return code is ignored as there is nothing a user
2720 * can do about failure to remove and a log message was
078b5876 2721 * already printed from the other function
41c445ff
JB
2722 */
2723 i40e_vsi_kill_vlan(vsi, vid);
2724
2725 clear_bit(vid, vsi->active_vlans);
078b5876 2726
41c445ff
JB
2727 return 0;
2728}
2729
2730/**
2731 * i40e_restore_vlan - Reinstate vlans when vsi/netdev comes back up
2732 * @vsi: the vsi being brought back up
2733 **/
2734static void i40e_restore_vlan(struct i40e_vsi *vsi)
2735{
2736 u16 vid;
2737
2738 if (!vsi->netdev)
2739 return;
2740
2741 i40e_vlan_rx_register(vsi->netdev, vsi->netdev->features);
2742
2743 for_each_set_bit(vid, vsi->active_vlans, VLAN_N_VID)
2744 i40e_vlan_rx_add_vid(vsi->netdev, htons(ETH_P_8021Q),
2745 vid);
2746}
2747
2748/**
2749 * i40e_vsi_add_pvid - Add pvid for the VSI
2750 * @vsi: the vsi being adjusted
2751 * @vid: the vlan id to set as a PVID
2752 **/
dcae29be 2753int i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid)
41c445ff
JB
2754{
2755 struct i40e_vsi_context ctxt;
f1c7e72e 2756 i40e_status ret;
41c445ff
JB
2757
2758 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
2759 vsi->info.pvid = cpu_to_le16(vid);
6c12fcbf
GR
2760 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_TAGGED |
2761 I40E_AQ_VSI_PVLAN_INSERT_PVID |
b774c7dd 2762 I40E_AQ_VSI_PVLAN_EMOD_STR;
41c445ff
JB
2763
2764 ctxt.seid = vsi->seid;
1a2f6248 2765 ctxt.info = vsi->info;
f1c7e72e
SN
2766 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
2767 if (ret) {
41c445ff 2768 dev_info(&vsi->back->pdev->dev,
f1c7e72e
SN
2769 "add pvid failed, err %s aq_err %s\n",
2770 i40e_stat_str(&vsi->back->hw, ret),
2771 i40e_aq_str(&vsi->back->hw,
2772 vsi->back->hw.aq.asq_last_status));
dcae29be 2773 return -ENOENT;
41c445ff
JB
2774 }
2775
dcae29be 2776 return 0;
41c445ff
JB
2777}
2778
2779/**
2780 * i40e_vsi_remove_pvid - Remove the pvid from the VSI
2781 * @vsi: the vsi being adjusted
2782 *
2783 * Just use the vlan_rx_register() service to put it back to normal
2784 **/
2785void i40e_vsi_remove_pvid(struct i40e_vsi *vsi)
2786{
6c12fcbf
GR
2787 i40e_vlan_stripping_disable(vsi);
2788
41c445ff 2789 vsi->info.pvid = 0;
41c445ff
JB
2790}
2791
2792/**
2793 * i40e_vsi_setup_tx_resources - Allocate VSI Tx queue resources
2794 * @vsi: ptr to the VSI
2795 *
2796 * If this function returns with an error, then it's possible one or
2797 * more of the rings is populated (while the rest are not). It is the
2798 * callers duty to clean those orphaned rings.
2799 *
2800 * Return 0 on success, negative on failure
2801 **/
2802static int i40e_vsi_setup_tx_resources(struct i40e_vsi *vsi)
2803{
2804 int i, err = 0;
2805
2806 for (i = 0; i < vsi->num_queue_pairs && !err; i++)
9f65e15b 2807 err = i40e_setup_tx_descriptors(vsi->tx_rings[i]);
41c445ff 2808
74608d17
BT
2809 if (!i40e_enabled_xdp_vsi(vsi))
2810 return err;
2811
2812 for (i = 0; i < vsi->num_queue_pairs && !err; i++)
2813 err = i40e_setup_tx_descriptors(vsi->xdp_rings[i]);
2814
41c445ff
JB
2815 return err;
2816}
2817
2818/**
2819 * i40e_vsi_free_tx_resources - Free Tx resources for VSI queues
2820 * @vsi: ptr to the VSI
2821 *
2822 * Free VSI's transmit software resources
2823 **/
2824static void i40e_vsi_free_tx_resources(struct i40e_vsi *vsi)
2825{
2826 int i;
2827
74608d17
BT
2828 if (vsi->tx_rings) {
2829 for (i = 0; i < vsi->num_queue_pairs; i++)
2830 if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc)
2831 i40e_free_tx_resources(vsi->tx_rings[i]);
2832 }
8e9dca53 2833
74608d17
BT
2834 if (vsi->xdp_rings) {
2835 for (i = 0; i < vsi->num_queue_pairs; i++)
2836 if (vsi->xdp_rings[i] && vsi->xdp_rings[i]->desc)
2837 i40e_free_tx_resources(vsi->xdp_rings[i]);
2838 }
41c445ff
JB
2839}
2840
2841/**
2842 * i40e_vsi_setup_rx_resources - Allocate VSI queues Rx resources
2843 * @vsi: ptr to the VSI
2844 *
2845 * If this function returns with an error, then it's possible one or
2846 * more of the rings is populated (while the rest are not). It is the
2847 * callers duty to clean those orphaned rings.
2848 *
2849 * Return 0 on success, negative on failure
2850 **/
2851static int i40e_vsi_setup_rx_resources(struct i40e_vsi *vsi)
2852{
2853 int i, err = 0;
2854
2855 for (i = 0; i < vsi->num_queue_pairs && !err; i++)
9f65e15b 2856 err = i40e_setup_rx_descriptors(vsi->rx_rings[i]);
41c445ff
JB
2857 return err;
2858}
2859
2860/**
2861 * i40e_vsi_free_rx_resources - Free Rx Resources for VSI queues
2862 * @vsi: ptr to the VSI
2863 *
2864 * Free all receive software resources
2865 **/
2866static void i40e_vsi_free_rx_resources(struct i40e_vsi *vsi)
2867{
2868 int i;
2869
8e9dca53
GR
2870 if (!vsi->rx_rings)
2871 return;
2872
41c445ff 2873 for (i = 0; i < vsi->num_queue_pairs; i++)
8e9dca53 2874 if (vsi->rx_rings[i] && vsi->rx_rings[i]->desc)
9f65e15b 2875 i40e_free_rx_resources(vsi->rx_rings[i]);
41c445ff
JB
2876}
2877
3ffa037d
NP
2878/**
2879 * i40e_config_xps_tx_ring - Configure XPS for a Tx ring
2880 * @ring: The Tx ring to configure
2881 *
2882 * This enables/disables XPS for a given Tx descriptor ring
2883 * based on the TCs enabled for the VSI that ring belongs to.
2884 **/
2885static void i40e_config_xps_tx_ring(struct i40e_ring *ring)
2886{
2887 struct i40e_vsi *vsi = ring->vsi;
be664cbe 2888 int cpu;
3ffa037d 2889
9a660eea
JB
2890 if (!ring->q_vector || !ring->netdev)
2891 return;
2892
ba4460d4
JK
2893 if ((vsi->tc_config.numtc <= 1) &&
2894 !test_and_set_bit(__I40E_TX_XPS_INIT_DONE, &ring->state)) {
be664cbe
JK
2895 cpu = cpumask_local_spread(ring->q_vector->v_idx, -1);
2896 netif_set_xps_queue(ring->netdev, get_cpu_mask(cpu),
ba4460d4 2897 ring->queue_index);
3ffa037d 2898 }
0e4425ed
JB
2899
2900 /* schedule our worker thread which will take care of
2901 * applying the new filter changes
2902 */
2903 i40e_service_event_schedule(vsi->back);
3ffa037d
NP
2904}
2905
41c445ff
JB
2906/**
2907 * i40e_configure_tx_ring - Configure a transmit ring context and rest
2908 * @ring: The Tx ring to configure
2909 *
2910 * Configure the Tx descriptor ring in the HMC context.
2911 **/
2912static int i40e_configure_tx_ring(struct i40e_ring *ring)
2913{
2914 struct i40e_vsi *vsi = ring->vsi;
2915 u16 pf_q = vsi->base_queue + ring->queue_index;
2916 struct i40e_hw *hw = &vsi->back->hw;
2917 struct i40e_hmc_obj_txq tx_ctx;
2918 i40e_status err = 0;
2919 u32 qtx_ctl = 0;
2920
2921 /* some ATR related tx ring init */
60ea5f83 2922 if (vsi->back->flags & I40E_FLAG_FD_ATR_ENABLED) {
41c445ff
JB
2923 ring->atr_sample_rate = vsi->back->atr_sample_rate;
2924 ring->atr_count = 0;
2925 } else {
2926 ring->atr_sample_rate = 0;
2927 }
2928
3ffa037d
NP
2929 /* configure XPS */
2930 i40e_config_xps_tx_ring(ring);
41c445ff
JB
2931
2932 /* clear the context structure first */
2933 memset(&tx_ctx, 0, sizeof(tx_ctx));
2934
2935 tx_ctx.new_context = 1;
2936 tx_ctx.base = (ring->dma / 128);
2937 tx_ctx.qlen = ring->count;
60ea5f83
JB
2938 tx_ctx.fd_ena = !!(vsi->back->flags & (I40E_FLAG_FD_SB_ENABLED |
2939 I40E_FLAG_FD_ATR_ENABLED));
beb0dff1 2940 tx_ctx.timesync_ena = !!(vsi->back->flags & I40E_FLAG_PTP);
1943d8ba
JB
2941 /* FDIR VSI tx ring can still use RS bit and writebacks */
2942 if (vsi->type != I40E_VSI_FDIR)
2943 tx_ctx.head_wb_ena = 1;
2944 tx_ctx.head_wb_addr = ring->dma +
2945 (ring->count * sizeof(struct i40e_tx_desc));
41c445ff
JB
2946
2947 /* As part of VSI creation/update, FW allocates certain
2948 * Tx arbitration queue sets for each TC enabled for
2949 * the VSI. The FW returns the handles to these queue
2950 * sets as part of the response buffer to Add VSI,
2951 * Update VSI, etc. AQ commands. It is expected that
2952 * these queue set handles be associated with the Tx
2953 * queues by the driver as part of the TX queue context
2954 * initialization. This has to be done regardless of
2955 * DCB as by default everything is mapped to TC0.
2956 */
2957 tx_ctx.rdylist = le16_to_cpu(vsi->info.qs_handle[ring->dcb_tc]);
2958 tx_ctx.rdylist_act = 0;
2959
2960 /* clear the context in the HMC */
2961 err = i40e_clear_lan_tx_queue_context(hw, pf_q);
2962 if (err) {
2963 dev_info(&vsi->back->pdev->dev,
2964 "Failed to clear LAN Tx queue context on Tx ring %d (pf_q %d), error: %d\n",
2965 ring->queue_index, pf_q, err);
2966 return -ENOMEM;
2967 }
2968
2969 /* set the context in the HMC */
2970 err = i40e_set_lan_tx_queue_context(hw, pf_q, &tx_ctx);
2971 if (err) {
2972 dev_info(&vsi->back->pdev->dev,
2973 "Failed to set LAN Tx queue context on Tx ring %d (pf_q %d, error: %d\n",
2974 ring->queue_index, pf_q, err);
2975 return -ENOMEM;
2976 }
2977
2978 /* Now associate this queue with this PCI function */
7a28d885 2979 if (vsi->type == I40E_VSI_VMDQ2) {
9d8bf547 2980 qtx_ctl = I40E_QTX_CTL_VM_QUEUE;
7a28d885
MW
2981 qtx_ctl |= ((vsi->id) << I40E_QTX_CTL_VFVM_INDX_SHIFT) &
2982 I40E_QTX_CTL_VFVM_INDX_MASK;
2983 } else {
9d8bf547 2984 qtx_ctl = I40E_QTX_CTL_PF_QUEUE;
7a28d885
MW
2985 }
2986
13fd9774
SN
2987 qtx_ctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) &
2988 I40E_QTX_CTL_PF_INDX_MASK);
41c445ff
JB
2989 wr32(hw, I40E_QTX_CTL(pf_q), qtx_ctl);
2990 i40e_flush(hw);
2991
41c445ff
JB
2992 /* cache tail off for easier writes later */
2993 ring->tail = hw->hw_addr + I40E_QTX_TAIL(pf_q);
2994
2995 return 0;
2996}
2997
2998/**
2999 * i40e_configure_rx_ring - Configure a receive ring context
3000 * @ring: The Rx ring to configure
3001 *
3002 * Configure the Rx descriptor ring in the HMC context.
3003 **/
3004static int i40e_configure_rx_ring(struct i40e_ring *ring)
3005{
3006 struct i40e_vsi *vsi = ring->vsi;
3007 u32 chain_len = vsi->back->hw.func_caps.rx_buf_chain_len;
3008 u16 pf_q = vsi->base_queue + ring->queue_index;
3009 struct i40e_hw *hw = &vsi->back->hw;
3010 struct i40e_hmc_obj_rxq rx_ctx;
3011 i40e_status err = 0;
3012
3013 ring->state = 0;
3014
3015 /* clear the context structure first */
3016 memset(&rx_ctx, 0, sizeof(rx_ctx));
3017
3018 ring->rx_buf_len = vsi->rx_buf_len;
41c445ff 3019
dab86afd
AD
3020 rx_ctx.dbuff = DIV_ROUND_UP(ring->rx_buf_len,
3021 BIT_ULL(I40E_RXQ_CTX_DBUFF_SHIFT));
41c445ff
JB
3022
3023 rx_ctx.base = (ring->dma / 128);
3024 rx_ctx.qlen = ring->count;
3025
bec60fc4
JB
3026 /* use 32 byte descriptors */
3027 rx_ctx.dsize = 1;
41c445ff 3028
bec60fc4
JB
3029 /* descriptor type is always zero
3030 * rx_ctx.dtype = 0;
3031 */
b32bfa17 3032 rx_ctx.hsplit_0 = 0;
41c445ff 3033
b32bfa17 3034 rx_ctx.rxmax = min_t(u16, vsi->max_frame, chain_len * ring->rx_buf_len);
7134f9ce
JB
3035 if (hw->revision_id == 0)
3036 rx_ctx.lrxqthresh = 0;
3037 else
3038 rx_ctx.lrxqthresh = 2;
41c445ff
JB
3039 rx_ctx.crcstrip = 1;
3040 rx_ctx.l2tsel = 1;
c4bbac39
JB
3041 /* this controls whether VLAN is stripped from inner headers */
3042 rx_ctx.showiv = 0;
acb3676b
CS
3043 /* set the prefena field to 1 because the manual says to */
3044 rx_ctx.prefena = 1;
41c445ff
JB
3045
3046 /* clear the context in the HMC */
3047 err = i40e_clear_lan_rx_queue_context(hw, pf_q);
3048 if (err) {
3049 dev_info(&vsi->back->pdev->dev,
3050 "Failed to clear LAN Rx queue context on Rx ring %d (pf_q %d), error: %d\n",
3051 ring->queue_index, pf_q, err);
3052 return -ENOMEM;
3053 }
3054
3055 /* set the context in the HMC */
3056 err = i40e_set_lan_rx_queue_context(hw, pf_q, &rx_ctx);
3057 if (err) {
3058 dev_info(&vsi->back->pdev->dev,
3059 "Failed to set LAN Rx queue context on Rx ring %d (pf_q %d), error: %d\n",
3060 ring->queue_index, pf_q, err);
3061 return -ENOMEM;
3062 }
3063
ca9ec088
AD
3064 /* configure Rx buffer alignment */
3065 if (!vsi->netdev || (vsi->back->flags & I40E_FLAG_LEGACY_RX))
3066 clear_ring_build_skb_enabled(ring);
3067 else
3068 set_ring_build_skb_enabled(ring);
3069
41c445ff
JB
3070 /* cache tail for quicker writes, and clear the reg before use */
3071 ring->tail = hw->hw_addr + I40E_QRX_TAIL(pf_q);
3072 writel(0, ring->tail);
3073
1a557afc 3074 i40e_alloc_rx_buffers(ring, I40E_DESC_UNUSED(ring));
41c445ff
JB
3075
3076 return 0;
3077}
3078
3079/**
3080 * i40e_vsi_configure_tx - Configure the VSI for Tx
3081 * @vsi: VSI structure describing this set of rings and resources
3082 *
3083 * Configure the Tx VSI for operation.
3084 **/
3085static int i40e_vsi_configure_tx(struct i40e_vsi *vsi)
3086{
3087 int err = 0;
3088 u16 i;
3089
9f65e15b
AD
3090 for (i = 0; (i < vsi->num_queue_pairs) && !err; i++)
3091 err = i40e_configure_tx_ring(vsi->tx_rings[i]);
41c445ff 3092
74608d17
BT
3093 if (!i40e_enabled_xdp_vsi(vsi))
3094 return err;
3095
3096 for (i = 0; (i < vsi->num_queue_pairs) && !err; i++)
3097 err = i40e_configure_tx_ring(vsi->xdp_rings[i]);
3098
41c445ff
JB
3099 return err;
3100}
3101
3102/**
3103 * i40e_vsi_configure_rx - Configure the VSI for Rx
3104 * @vsi: the VSI being configured
3105 *
3106 * Configure the Rx VSI for operation.
3107 **/
3108static int i40e_vsi_configure_rx(struct i40e_vsi *vsi)
3109{
3110 int err = 0;
3111 u16 i;
3112
dab86afd
AD
3113 if (!vsi->netdev || (vsi->back->flags & I40E_FLAG_LEGACY_RX)) {
3114 vsi->max_frame = I40E_MAX_RXBUFFER;
3115 vsi->rx_buf_len = I40E_RXBUFFER_2048;
3116#if (PAGE_SIZE < 8192)
ca9ec088
AD
3117 } else if (!I40E_2K_TOO_SMALL_WITH_PADDING &&
3118 (vsi->netdev->mtu <= ETH_DATA_LEN)) {
dab86afd
AD
3119 vsi->max_frame = I40E_RXBUFFER_1536 - NET_IP_ALIGN;
3120 vsi->rx_buf_len = I40E_RXBUFFER_1536 - NET_IP_ALIGN;
3121#endif
3122 } else {
3123 vsi->max_frame = I40E_MAX_RXBUFFER;
98efd694
AD
3124 vsi->rx_buf_len = (PAGE_SIZE < 8192) ? I40E_RXBUFFER_3072 :
3125 I40E_RXBUFFER_2048;
dab86afd 3126 }
41c445ff
JB
3127
3128 /* set up individual rings */
3129 for (i = 0; i < vsi->num_queue_pairs && !err; i++)
9f65e15b 3130 err = i40e_configure_rx_ring(vsi->rx_rings[i]);
41c445ff
JB
3131
3132 return err;
3133}
3134
3135/**
3136 * i40e_vsi_config_dcb_rings - Update rings to reflect DCB TC
3137 * @vsi: ptr to the VSI
3138 **/
3139static void i40e_vsi_config_dcb_rings(struct i40e_vsi *vsi)
3140{
e7046ee1 3141 struct i40e_ring *tx_ring, *rx_ring;
41c445ff
JB
3142 u16 qoffset, qcount;
3143 int i, n;
3144
cd238a3e
PN
3145 if (!(vsi->back->flags & I40E_FLAG_DCB_ENABLED)) {
3146 /* Reset the TC information */
3147 for (i = 0; i < vsi->num_queue_pairs; i++) {
3148 rx_ring = vsi->rx_rings[i];
3149 tx_ring = vsi->tx_rings[i];
3150 rx_ring->dcb_tc = 0;
3151 tx_ring->dcb_tc = 0;
3152 }
3153 }
41c445ff
JB
3154
3155 for (n = 0; n < I40E_MAX_TRAFFIC_CLASS; n++) {
41a1d04b 3156 if (!(vsi->tc_config.enabled_tc & BIT_ULL(n)))
41c445ff
JB
3157 continue;
3158
3159 qoffset = vsi->tc_config.tc_info[n].qoffset;
3160 qcount = vsi->tc_config.tc_info[n].qcount;
3161 for (i = qoffset; i < (qoffset + qcount); i++) {
e7046ee1
AA
3162 rx_ring = vsi->rx_rings[i];
3163 tx_ring = vsi->tx_rings[i];
41c445ff
JB
3164 rx_ring->dcb_tc = n;
3165 tx_ring->dcb_tc = n;
3166 }
3167 }
3168}
3169
3170/**
3171 * i40e_set_vsi_rx_mode - Call set_rx_mode on a VSI
3172 * @vsi: ptr to the VSI
3173 **/
3174static void i40e_set_vsi_rx_mode(struct i40e_vsi *vsi)
3175{
3176 if (vsi->netdev)
3177 i40e_set_rx_mode(vsi->netdev);
3178}
3179
17a73f6b
JG
3180/**
3181 * i40e_fdir_filter_restore - Restore the Sideband Flow Director filters
3182 * @vsi: Pointer to the targeted VSI
3183 *
3184 * This function replays the hlist on the hw where all the SB Flow Director
3185 * filters were saved.
3186 **/
3187static void i40e_fdir_filter_restore(struct i40e_vsi *vsi)
3188{
3189 struct i40e_fdir_filter *filter;
3190 struct i40e_pf *pf = vsi->back;
3191 struct hlist_node *node;
3192
55a5e60b
ASJ
3193 if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
3194 return;
3195
6d069425 3196 /* Reset FDir counters as we're replaying all existing filters */
097dbf52
JK
3197 pf->fd_tcp4_filter_cnt = 0;
3198 pf->fd_udp4_filter_cnt = 0;
f223c875 3199 pf->fd_sctp4_filter_cnt = 0;
097dbf52 3200 pf->fd_ip4_filter_cnt = 0;
6d069425 3201
17a73f6b
JG
3202 hlist_for_each_entry_safe(filter, node,
3203 &pf->fdir_filter_list, fdir_node) {
3204 i40e_add_del_fdir(vsi, filter, true);
3205 }
3206}
3207
41c445ff
JB
3208/**
3209 * i40e_vsi_configure - Set up the VSI for action
3210 * @vsi: the VSI being configured
3211 **/
3212static int i40e_vsi_configure(struct i40e_vsi *vsi)
3213{
3214 int err;
3215
3216 i40e_set_vsi_rx_mode(vsi);
3217 i40e_restore_vlan(vsi);
3218 i40e_vsi_config_dcb_rings(vsi);
3219 err = i40e_vsi_configure_tx(vsi);
3220 if (!err)
3221 err = i40e_vsi_configure_rx(vsi);
3222
3223 return err;
3224}
3225
3226/**
3227 * i40e_vsi_configure_msix - MSIX mode Interrupt Config in the HW
3228 * @vsi: the VSI being configured
3229 **/
3230static void i40e_vsi_configure_msix(struct i40e_vsi *vsi)
3231{
74608d17 3232 bool has_xdp = i40e_enabled_xdp_vsi(vsi);
41c445ff 3233 struct i40e_pf *pf = vsi->back;
41c445ff
JB
3234 struct i40e_hw *hw = &pf->hw;
3235 u16 vector;
3236 int i, q;
41c445ff
JB
3237 u32 qp;
3238
3239 /* The interrupt indexing is offset by 1 in the PFINT_ITRn
3240 * and PFINT_LNKLSTn registers, e.g.:
3241 * PFINT_ITRn[0..n-1] gets msix-1..msix-n (qpair interrupts)
3242 */
3243 qp = vsi->base_queue;
3244 vector = vsi->base_vector;
493fb300 3245 for (i = 0; i < vsi->num_q_vectors; i++, vector++) {
ac26fc13
JB
3246 struct i40e_q_vector *q_vector = vsi->q_vectors[i];
3247
ee2319cf 3248 q_vector->itr_countdown = ITR_COUNTDOWN_START;
a75e8005 3249 q_vector->rx.itr = ITR_TO_REG(vsi->rx_rings[i]->rx_itr_setting);
41c445ff
JB
3250 q_vector->rx.latency_range = I40E_LOW_LATENCY;
3251 wr32(hw, I40E_PFINT_ITRN(I40E_RX_ITR, vector - 1),
3252 q_vector->rx.itr);
a75e8005 3253 q_vector->tx.itr = ITR_TO_REG(vsi->tx_rings[i]->tx_itr_setting);
41c445ff
JB
3254 q_vector->tx.latency_range = I40E_LOW_LATENCY;
3255 wr32(hw, I40E_PFINT_ITRN(I40E_TX_ITR, vector - 1),
3256 q_vector->tx.itr);
ac26fc13 3257 wr32(hw, I40E_PFINT_RATEN(vector - 1),
1c0e6a36 3258 i40e_intrl_usec_to_reg(vsi->int_rate_limit));
41c445ff
JB
3259
3260 /* Linked list for the queuepairs assigned to this vector */
3261 wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), qp);
3262 for (q = 0; q < q_vector->num_ringpairs; q++) {
74608d17 3263 u32 nextqp = has_xdp ? qp + vsi->alloc_queue_pairs : qp;
ac26fc13
JB
3264 u32 val;
3265
41c445ff 3266 val = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
74608d17
BT
3267 (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
3268 (vector << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
3269 (nextqp << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
3270 (I40E_QUEUE_TYPE_TX <<
3271 I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT);
41c445ff
JB
3272
3273 wr32(hw, I40E_QINT_RQCTL(qp), val);
3274
74608d17
BT
3275 if (has_xdp) {
3276 val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
3277 (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
3278 (vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
3279 (qp << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) |
3280 (I40E_QUEUE_TYPE_TX <<
3281 I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
3282
3283 wr32(hw, I40E_QINT_TQCTL(nextqp), val);
3284 }
3285
41c445ff 3286 val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
74608d17
BT
3287 (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
3288 (vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
3289 ((qp + 1) << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) |
3290 (I40E_QUEUE_TYPE_RX <<
3291 I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
41c445ff
JB
3292
3293 /* Terminate the linked list */
3294 if (q == (q_vector->num_ringpairs - 1))
74608d17
BT
3295 val |= (I40E_QUEUE_END_OF_LIST <<
3296 I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
41c445ff
JB
3297
3298 wr32(hw, I40E_QINT_TQCTL(qp), val);
3299 qp++;
3300 }
3301 }
3302
3303 i40e_flush(hw);
3304}
3305
3306/**
3307 * i40e_enable_misc_int_causes - enable the non-queue interrupts
3308 * @hw: ptr to the hardware info
3309 **/
ab437b5a 3310static void i40e_enable_misc_int_causes(struct i40e_pf *pf)
41c445ff 3311{
ab437b5a 3312 struct i40e_hw *hw = &pf->hw;
41c445ff
JB
3313 u32 val;
3314
3315 /* clear things first */
3316 wr32(hw, I40E_PFINT_ICR0_ENA, 0); /* disable all */
3317 rd32(hw, I40E_PFINT_ICR0); /* read to clear */
3318
3319 val = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK |
3320 I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK |
3321 I40E_PFINT_ICR0_ENA_GRST_MASK |
3322 I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK |
3323 I40E_PFINT_ICR0_ENA_GPIO_MASK |
41c445ff
JB
3324 I40E_PFINT_ICR0_ENA_HMC_ERR_MASK |
3325 I40E_PFINT_ICR0_ENA_VFLR_MASK |
3326 I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
3327
0d8e1439
ASJ
3328 if (pf->flags & I40E_FLAG_IWARP_ENABLED)
3329 val |= I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK;
3330
ab437b5a
JK
3331 if (pf->flags & I40E_FLAG_PTP)
3332 val |= I40E_PFINT_ICR0_ENA_TIMESYNC_MASK;
3333
41c445ff
JB
3334 wr32(hw, I40E_PFINT_ICR0_ENA, val);
3335
3336 /* SW_ITR_IDX = 0, but don't change INTENA */
84ed40e7
ASJ
3337 wr32(hw, I40E_PFINT_DYN_CTL0, I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK |
3338 I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK);
41c445ff
JB
3339
3340 /* OTHER_ITR_IDX = 0 */
3341 wr32(hw, I40E_PFINT_STAT_CTL0, 0);
3342}
3343
3344/**
3345 * i40e_configure_msi_and_legacy - Legacy mode interrupt config in the HW
3346 * @vsi: the VSI being configured
3347 **/
3348static void i40e_configure_msi_and_legacy(struct i40e_vsi *vsi)
3349{
74608d17 3350 u32 nextqp = i40e_enabled_xdp_vsi(vsi) ? vsi->alloc_queue_pairs : 0;
493fb300 3351 struct i40e_q_vector *q_vector = vsi->q_vectors[0];
41c445ff
JB
3352 struct i40e_pf *pf = vsi->back;
3353 struct i40e_hw *hw = &pf->hw;
3354 u32 val;
3355
3356 /* set the ITR configuration */
ee2319cf 3357 q_vector->itr_countdown = ITR_COUNTDOWN_START;
a75e8005 3358 q_vector->rx.itr = ITR_TO_REG(vsi->rx_rings[0]->rx_itr_setting);
41c445ff
JB
3359 q_vector->rx.latency_range = I40E_LOW_LATENCY;
3360 wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), q_vector->rx.itr);
a75e8005 3361 q_vector->tx.itr = ITR_TO_REG(vsi->tx_rings[0]->tx_itr_setting);
41c445ff
JB
3362 q_vector->tx.latency_range = I40E_LOW_LATENCY;
3363 wr32(hw, I40E_PFINT_ITR0(I40E_TX_ITR), q_vector->tx.itr);
3364
ab437b5a 3365 i40e_enable_misc_int_causes(pf);
41c445ff
JB
3366
3367 /* FIRSTQ_INDX = 0, FIRSTQ_TYPE = 0 (rx) */
3368 wr32(hw, I40E_PFINT_LNKLST0, 0);
3369
f29eaa3d 3370 /* Associate the queue pair to the vector and enable the queue int */
74608d17
BT
3371 val = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
3372 (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
3373 (nextqp << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT)|
41c445ff
JB
3374 (I40E_QUEUE_TYPE_TX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
3375
3376 wr32(hw, I40E_QINT_RQCTL(0), val);
3377
74608d17
BT
3378 if (i40e_enabled_xdp_vsi(vsi)) {
3379 val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
3380 (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT)|
3381 (I40E_QUEUE_TYPE_TX
3382 << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
3383
3384 wr32(hw, I40E_QINT_TQCTL(nextqp), val);
3385 }
3386
41c445ff
JB
3387 val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
3388 (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
3389 (I40E_QUEUE_END_OF_LIST << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
3390
3391 wr32(hw, I40E_QINT_TQCTL(0), val);
3392 i40e_flush(hw);
3393}
3394
2ef28cfb
MW
3395/**
3396 * i40e_irq_dynamic_disable_icr0 - Disable default interrupt generation for icr0
3397 * @pf: board private structure
3398 **/
3399void i40e_irq_dynamic_disable_icr0(struct i40e_pf *pf)
3400{
3401 struct i40e_hw *hw = &pf->hw;
3402
3403 wr32(hw, I40E_PFINT_DYN_CTL0,
3404 I40E_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
3405 i40e_flush(hw);
3406}
3407
41c445ff
JB
3408/**
3409 * i40e_irq_dynamic_enable_icr0 - Enable default interrupt generation for icr0
3410 * @pf: board private structure
40d72a50 3411 * @clearpba: true when all pending interrupt events should be cleared
41c445ff 3412 **/
40d72a50 3413void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf, bool clearpba)
41c445ff
JB
3414{
3415 struct i40e_hw *hw = &pf->hw;
3416 u32 val;
3417
3418 val = I40E_PFINT_DYN_CTL0_INTENA_MASK |
40d72a50 3419 (clearpba ? I40E_PFINT_DYN_CTL0_CLEARPBA_MASK : 0) |
41c445ff
JB
3420 (I40E_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT);
3421
3422 wr32(hw, I40E_PFINT_DYN_CTL0, val);
3423 i40e_flush(hw);
3424}
3425
41c445ff
JB
3426/**
3427 * i40e_msix_clean_rings - MSIX mode Interrupt Handler
3428 * @irq: interrupt number
3429 * @data: pointer to a q_vector
3430 **/
3431static irqreturn_t i40e_msix_clean_rings(int irq, void *data)
3432{
3433 struct i40e_q_vector *q_vector = data;
3434
cd0b6fa6 3435 if (!q_vector->tx.ring && !q_vector->rx.ring)
41c445ff
JB
3436 return IRQ_HANDLED;
3437
5d3465a1 3438 napi_schedule_irqoff(&q_vector->napi);
41c445ff
JB
3439
3440 return IRQ_HANDLED;
3441}
3442
96db776a
AB
3443/**
3444 * i40e_irq_affinity_notify - Callback for affinity changes
3445 * @notify: context as to what irq was changed
3446 * @mask: the new affinity mask
3447 *
3448 * This is a callback function used by the irq_set_affinity_notifier function
3449 * so that we may register to receive changes to the irq affinity masks.
3450 **/
3451static void i40e_irq_affinity_notify(struct irq_affinity_notify *notify,
3452 const cpumask_t *mask)
3453{
3454 struct i40e_q_vector *q_vector =
3455 container_of(notify, struct i40e_q_vector, affinity_notify);
3456
7e4d01e7 3457 cpumask_copy(&q_vector->affinity_mask, mask);
96db776a
AB
3458}
3459
3460/**
3461 * i40e_irq_affinity_release - Callback for affinity notifier release
3462 * @ref: internal core kernel usage
3463 *
3464 * This is a callback function used by the irq_set_affinity_notifier function
3465 * to inform the current notification subscriber that they will no longer
3466 * receive notifications.
3467 **/
3468static void i40e_irq_affinity_release(struct kref *ref) {}
3469
41c445ff
JB
3470/**
3471 * i40e_vsi_request_irq_msix - Initialize MSI-X interrupts
3472 * @vsi: the VSI being configured
3473 * @basename: name for the vector
3474 *
3475 * Allocates MSI-X vectors and requests interrupts from the kernel.
3476 **/
3477static int i40e_vsi_request_irq_msix(struct i40e_vsi *vsi, char *basename)
3478{
3479 int q_vectors = vsi->num_q_vectors;
3480 struct i40e_pf *pf = vsi->back;
3481 int base = vsi->base_vector;
3482 int rx_int_idx = 0;
3483 int tx_int_idx = 0;
3484 int vector, err;
96db776a 3485 int irq_num;
be664cbe 3486 int cpu;
41c445ff
JB
3487
3488 for (vector = 0; vector < q_vectors; vector++) {
493fb300 3489 struct i40e_q_vector *q_vector = vsi->q_vectors[vector];
41c445ff 3490
96db776a
AB
3491 irq_num = pf->msix_entries[base + vector].vector;
3492
cd0b6fa6 3493 if (q_vector->tx.ring && q_vector->rx.ring) {
41c445ff
JB
3494 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
3495 "%s-%s-%d", basename, "TxRx", rx_int_idx++);
3496 tx_int_idx++;
cd0b6fa6 3497 } else if (q_vector->rx.ring) {
41c445ff
JB
3498 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
3499 "%s-%s-%d", basename, "rx", rx_int_idx++);
cd0b6fa6 3500 } else if (q_vector->tx.ring) {
41c445ff
JB
3501 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
3502 "%s-%s-%d", basename, "tx", tx_int_idx++);
3503 } else {
3504 /* skip this unused q_vector */
3505 continue;
3506 }
96db776a 3507 err = request_irq(irq_num,
41c445ff
JB
3508 vsi->irq_handler,
3509 0,
3510 q_vector->name,
3511 q_vector);
3512 if (err) {
3513 dev_info(&pf->pdev->dev,
fb43201f 3514 "MSIX request_irq failed, error: %d\n", err);
41c445ff
JB
3515 goto free_queue_irqs;
3516 }
96db776a
AB
3517
3518 /* register for affinity change notifications */
3519 q_vector->affinity_notify.notify = i40e_irq_affinity_notify;
3520 q_vector->affinity_notify.release = i40e_irq_affinity_release;
3521 irq_set_affinity_notifier(irq_num, &q_vector->affinity_notify);
be664cbe
JK
3522 /* Spread affinity hints out across online CPUs.
3523 *
3524 * get_cpu_mask returns a static constant mask with
3525 * a permanent lifetime so it's ok to pass to
3526 * irq_set_affinity_hint without making a copy.
759dc4a7 3527 */
be664cbe
JK
3528 cpu = cpumask_local_spread(q_vector->v_idx, -1);
3529 irq_set_affinity_hint(irq_num, get_cpu_mask(cpu));
41c445ff
JB
3530 }
3531
63741846 3532 vsi->irqs_ready = true;
41c445ff
JB
3533 return 0;
3534
3535free_queue_irqs:
3536 while (vector) {
3537 vector--;
96db776a
AB
3538 irq_num = pf->msix_entries[base + vector].vector;
3539 irq_set_affinity_notifier(irq_num, NULL);
3540 irq_set_affinity_hint(irq_num, NULL);
3541 free_irq(irq_num, &vsi->q_vectors[vector]);
41c445ff
JB
3542 }
3543 return err;
3544}
3545
3546/**
3547 * i40e_vsi_disable_irq - Mask off queue interrupt generation on the VSI
3548 * @vsi: the VSI being un-configured
3549 **/
3550static void i40e_vsi_disable_irq(struct i40e_vsi *vsi)
3551{
3552 struct i40e_pf *pf = vsi->back;
3553 struct i40e_hw *hw = &pf->hw;
3554 int base = vsi->base_vector;
3555 int i;
3556
2e5c26ea 3557 /* disable interrupt causation from each queue */
41c445ff 3558 for (i = 0; i < vsi->num_queue_pairs; i++) {
2e5c26ea
SN
3559 u32 val;
3560
3561 val = rd32(hw, I40E_QINT_TQCTL(vsi->tx_rings[i]->reg_idx));
3562 val &= ~I40E_QINT_TQCTL_CAUSE_ENA_MASK;
3563 wr32(hw, I40E_QINT_TQCTL(vsi->tx_rings[i]->reg_idx), val);
3564
3565 val = rd32(hw, I40E_QINT_RQCTL(vsi->rx_rings[i]->reg_idx));
3566 val &= ~I40E_QINT_RQCTL_CAUSE_ENA_MASK;
3567 wr32(hw, I40E_QINT_RQCTL(vsi->rx_rings[i]->reg_idx), val);
3568
74608d17
BT
3569 if (!i40e_enabled_xdp_vsi(vsi))
3570 continue;
3571 wr32(hw, I40E_QINT_TQCTL(vsi->xdp_rings[i]->reg_idx), 0);
41c445ff
JB
3572 }
3573
2e5c26ea 3574 /* disable each interrupt */
41c445ff
JB
3575 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
3576 for (i = vsi->base_vector;
3577 i < (vsi->num_q_vectors + vsi->base_vector); i++)
3578 wr32(hw, I40E_PFINT_DYN_CTLN(i - 1), 0);
3579
3580 i40e_flush(hw);
3581 for (i = 0; i < vsi->num_q_vectors; i++)
3582 synchronize_irq(pf->msix_entries[i + base].vector);
3583 } else {
3584 /* Legacy and MSI mode - this stops all interrupt handling */
3585 wr32(hw, I40E_PFINT_ICR0_ENA, 0);
3586 wr32(hw, I40E_PFINT_DYN_CTL0, 0);
3587 i40e_flush(hw);
3588 synchronize_irq(pf->pdev->irq);
3589 }
3590}
3591
3592/**
3593 * i40e_vsi_enable_irq - Enable IRQ for the given VSI
3594 * @vsi: the VSI being configured
3595 **/
3596static int i40e_vsi_enable_irq(struct i40e_vsi *vsi)
3597{
3598 struct i40e_pf *pf = vsi->back;
3599 int i;
3600
3601 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
7845548d 3602 for (i = 0; i < vsi->num_q_vectors; i++)
41c445ff
JB
3603 i40e_irq_dynamic_enable(vsi, i);
3604 } else {
40d72a50 3605 i40e_irq_dynamic_enable_icr0(pf, true);
41c445ff
JB
3606 }
3607
1022cb6c 3608 i40e_flush(&pf->hw);
41c445ff
JB
3609 return 0;
3610}
3611
3612/**
c17401a1 3613 * i40e_free_misc_vector - Free the vector that handles non-queue events
41c445ff
JB
3614 * @pf: board private structure
3615 **/
c17401a1 3616static void i40e_free_misc_vector(struct i40e_pf *pf)
41c445ff
JB
3617{
3618 /* Disable ICR 0 */
3619 wr32(&pf->hw, I40E_PFINT_ICR0_ENA, 0);
3620 i40e_flush(&pf->hw);
c17401a1
JK
3621
3622 if (pf->flags & I40E_FLAG_MSIX_ENABLED && pf->msix_entries) {
3623 synchronize_irq(pf->msix_entries[0].vector);
3624 free_irq(pf->msix_entries[0].vector, pf);
3625 clear_bit(__I40E_MISC_IRQ_REQUESTED, pf->state);
3626 }
41c445ff
JB
3627}
3628
3629/**
3630 * i40e_intr - MSI/Legacy and non-queue interrupt handler
3631 * @irq: interrupt number
3632 * @data: pointer to a q_vector
3633 *
3634 * This is the handler used for all MSI/Legacy interrupts, and deals
3635 * with both queue and non-queue interrupts. This is also used in
3636 * MSIX mode to handle the non-queue interrupts.
3637 **/
3638static irqreturn_t i40e_intr(int irq, void *data)
3639{
3640 struct i40e_pf *pf = (struct i40e_pf *)data;
3641 struct i40e_hw *hw = &pf->hw;
5e823066 3642 irqreturn_t ret = IRQ_NONE;
41c445ff
JB
3643 u32 icr0, icr0_remaining;
3644 u32 val, ena_mask;
3645
3646 icr0 = rd32(hw, I40E_PFINT_ICR0);
5e823066 3647 ena_mask = rd32(hw, I40E_PFINT_ICR0_ENA);
41c445ff 3648
116a57d4
SN
3649 /* if sharing a legacy IRQ, we might get called w/o an intr pending */
3650 if ((icr0 & I40E_PFINT_ICR0_INTEVENT_MASK) == 0)
5e823066 3651 goto enable_intr;
41c445ff 3652
cd92e72f
SN
3653 /* if interrupt but no bits showing, must be SWINT */
3654 if (((icr0 & ~I40E_PFINT_ICR0_INTEVENT_MASK) == 0) ||
3655 (icr0 & I40E_PFINT_ICR0_SWINT_MASK))
3656 pf->sw_int_count++;
3657
0d8e1439 3658 if ((pf->flags & I40E_FLAG_IWARP_ENABLED) &&
7642984b 3659 (icr0 & I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK)) {
0d8e1439 3660 ena_mask &= ~I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK;
23bb6dc3 3661 dev_dbg(&pf->pdev->dev, "cleared PE_CRITERR\n");
7642984b 3662 set_bit(__I40E_CORE_RESET_REQUESTED, pf->state);
0d8e1439
ASJ
3663 }
3664
41c445ff
JB
3665 /* only q0 is used in MSI/Legacy mode, and none are used in MSIX */
3666 if (icr0 & I40E_PFINT_ICR0_QUEUE_0_MASK) {
5d3465a1
AD
3667 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
3668 struct i40e_q_vector *q_vector = vsi->q_vectors[0];
41c445ff 3669
a16ae2d5
ASJ
3670 /* We do not have a way to disarm Queue causes while leaving
3671 * interrupt enabled for all other causes, ideally
3672 * interrupt should be disabled while we are in NAPI but
3673 * this is not a performance path and napi_schedule()
3674 * can deal with rescheduling.
3675 */
9e6c9c0f 3676 if (!test_bit(__I40E_DOWN, pf->state))
5d3465a1 3677 napi_schedule_irqoff(&q_vector->napi);
41c445ff
JB
3678 }
3679
3680 if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
3681 ena_mask &= ~I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
0da36b97 3682 set_bit(__I40E_ADMINQ_EVENT_PENDING, pf->state);
6e93d0c9 3683 i40e_debug(&pf->hw, I40E_DEBUG_NVM, "AdminQ event\n");
41c445ff
JB
3684 }
3685
3686 if (icr0 & I40E_PFINT_ICR0_MAL_DETECT_MASK) {
3687 ena_mask &= ~I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
0da36b97 3688 set_bit(__I40E_MDD_EVENT_PENDING, pf->state);
41c445ff
JB
3689 }
3690
3691 if (icr0 & I40E_PFINT_ICR0_VFLR_MASK) {
3692 ena_mask &= ~I40E_PFINT_ICR0_ENA_VFLR_MASK;
0da36b97 3693 set_bit(__I40E_VFLR_EVENT_PENDING, pf->state);
41c445ff
JB
3694 }
3695
3696 if (icr0 & I40E_PFINT_ICR0_GRST_MASK) {
0da36b97
JK
3697 if (!test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
3698 set_bit(__I40E_RESET_INTR_RECEIVED, pf->state);
41c445ff
JB
3699 ena_mask &= ~I40E_PFINT_ICR0_ENA_GRST_MASK;
3700 val = rd32(hw, I40E_GLGEN_RSTAT);
3701 val = (val & I40E_GLGEN_RSTAT_RESET_TYPE_MASK)
3702 >> I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT;
4eb3f768 3703 if (val == I40E_RESET_CORER) {
41c445ff 3704 pf->corer_count++;
4eb3f768 3705 } else if (val == I40E_RESET_GLOBR) {
41c445ff 3706 pf->globr_count++;
4eb3f768 3707 } else if (val == I40E_RESET_EMPR) {
41c445ff 3708 pf->empr_count++;
0da36b97 3709 set_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state);
4eb3f768 3710 }
41c445ff
JB
3711 }
3712
9c010ee0
ASJ
3713 if (icr0 & I40E_PFINT_ICR0_HMC_ERR_MASK) {
3714 icr0 &= ~I40E_PFINT_ICR0_HMC_ERR_MASK;
3715 dev_info(&pf->pdev->dev, "HMC error interrupt\n");
25fc0e65
ASJ
3716 dev_info(&pf->pdev->dev, "HMC error info 0x%x, HMC error data 0x%x\n",
3717 rd32(hw, I40E_PFHMC_ERRORINFO),
3718 rd32(hw, I40E_PFHMC_ERRORDATA));
9c010ee0
ASJ
3719 }
3720
beb0dff1
JK
3721 if (icr0 & I40E_PFINT_ICR0_TIMESYNC_MASK) {
3722 u32 prttsyn_stat = rd32(hw, I40E_PRTTSYN_STAT_0);
3723
3724 if (prttsyn_stat & I40E_PRTTSYN_STAT_0_TXTIME_MASK) {
cafa1fca 3725 icr0 &= ~I40E_PFINT_ICR0_ENA_TIMESYNC_MASK;
beb0dff1 3726 i40e_ptp_tx_hwtstamp(pf);
beb0dff1 3727 }
beb0dff1
JK
3728 }
3729
41c445ff
JB
3730 /* If a critical error is pending we have no choice but to reset the
3731 * device.
3732 * Report and mask out any remaining unexpected interrupts.
3733 */
3734 icr0_remaining = icr0 & ena_mask;
3735 if (icr0_remaining) {
3736 dev_info(&pf->pdev->dev, "unhandled interrupt icr0=0x%08x\n",
3737 icr0_remaining);
9c010ee0 3738 if ((icr0_remaining & I40E_PFINT_ICR0_PE_CRITERR_MASK) ||
41c445ff 3739 (icr0_remaining & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK) ||
c0c28975 3740 (icr0_remaining & I40E_PFINT_ICR0_ECC_ERR_MASK)) {
9c010ee0 3741 dev_info(&pf->pdev->dev, "device will be reset\n");
0da36b97 3742 set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
9c010ee0 3743 i40e_service_event_schedule(pf);
41c445ff
JB
3744 }
3745 ena_mask &= ~icr0_remaining;
3746 }
5e823066 3747 ret = IRQ_HANDLED;
41c445ff 3748
5e823066 3749enable_intr:
41c445ff
JB
3750 /* re-enable interrupt causes */
3751 wr32(hw, I40E_PFINT_ICR0_ENA, ena_mask);
9e6c9c0f 3752 if (!test_bit(__I40E_DOWN, pf->state)) {
41c445ff 3753 i40e_service_event_schedule(pf);
40d72a50 3754 i40e_irq_dynamic_enable_icr0(pf, false);
41c445ff
JB
3755 }
3756
5e823066 3757 return ret;
41c445ff
JB
3758}
3759
cbf61325
ASJ
3760/**
3761 * i40e_clean_fdir_tx_irq - Reclaim resources after transmit completes
3762 * @tx_ring: tx ring to clean
3763 * @budget: how many cleans we're allowed
3764 *
3765 * Returns true if there's any budget left (e.g. the clean is finished)
3766 **/
3767static bool i40e_clean_fdir_tx_irq(struct i40e_ring *tx_ring, int budget)
3768{
3769 struct i40e_vsi *vsi = tx_ring->vsi;
3770 u16 i = tx_ring->next_to_clean;
3771 struct i40e_tx_buffer *tx_buf;
3772 struct i40e_tx_desc *tx_desc;
3773
3774 tx_buf = &tx_ring->tx_bi[i];
3775 tx_desc = I40E_TX_DESC(tx_ring, i);
3776 i -= tx_ring->count;
3777
3778 do {
3779 struct i40e_tx_desc *eop_desc = tx_buf->next_to_watch;
3780
3781 /* if next_to_watch is not set then there is no work pending */
3782 if (!eop_desc)
3783 break;
3784
3785 /* prevent any other reads prior to eop_desc */
3786 read_barrier_depends();
3787
3788 /* if the descriptor isn't done, no work yet to do */
3789 if (!(eop_desc->cmd_type_offset_bsz &
3790 cpu_to_le64(I40E_TX_DESC_DTYPE_DESC_DONE)))
3791 break;
3792
3793 /* clear next_to_watch to prevent false hangs */
3794 tx_buf->next_to_watch = NULL;
3795
49d7d933
ASJ
3796 tx_desc->buffer_addr = 0;
3797 tx_desc->cmd_type_offset_bsz = 0;
3798 /* move past filter desc */
3799 tx_buf++;
3800 tx_desc++;
3801 i++;
3802 if (unlikely(!i)) {
3803 i -= tx_ring->count;
3804 tx_buf = tx_ring->tx_bi;
3805 tx_desc = I40E_TX_DESC(tx_ring, 0);
3806 }
cbf61325
ASJ
3807 /* unmap skb header data */
3808 dma_unmap_single(tx_ring->dev,
3809 dma_unmap_addr(tx_buf, dma),
3810 dma_unmap_len(tx_buf, len),
3811 DMA_TO_DEVICE);
49d7d933
ASJ
3812 if (tx_buf->tx_flags & I40E_TX_FLAGS_FD_SB)
3813 kfree(tx_buf->raw_buf);
cbf61325 3814
49d7d933
ASJ
3815 tx_buf->raw_buf = NULL;
3816 tx_buf->tx_flags = 0;
3817 tx_buf->next_to_watch = NULL;
cbf61325 3818 dma_unmap_len_set(tx_buf, len, 0);
49d7d933
ASJ
3819 tx_desc->buffer_addr = 0;
3820 tx_desc->cmd_type_offset_bsz = 0;
cbf61325 3821
49d7d933 3822 /* move us past the eop_desc for start of next FD desc */
cbf61325
ASJ
3823 tx_buf++;
3824 tx_desc++;
3825 i++;
3826 if (unlikely(!i)) {
3827 i -= tx_ring->count;
3828 tx_buf = tx_ring->tx_bi;
3829 tx_desc = I40E_TX_DESC(tx_ring, 0);
3830 }
3831
3832 /* update budget accounting */
3833 budget--;
3834 } while (likely(budget));
3835
3836 i += tx_ring->count;
3837 tx_ring->next_to_clean = i;
3838
6995b36c 3839 if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED)
7845548d 3840 i40e_irq_dynamic_enable(vsi, tx_ring->q_vector->v_idx);
6995b36c 3841
cbf61325
ASJ
3842 return budget > 0;
3843}
3844
3845/**
3846 * i40e_fdir_clean_ring - Interrupt Handler for FDIR SB ring
3847 * @irq: interrupt number
3848 * @data: pointer to a q_vector
3849 **/
3850static irqreturn_t i40e_fdir_clean_ring(int irq, void *data)
3851{
3852 struct i40e_q_vector *q_vector = data;
3853 struct i40e_vsi *vsi;
3854
3855 if (!q_vector->tx.ring)
3856 return IRQ_HANDLED;
3857
3858 vsi = q_vector->tx.ring->vsi;
3859 i40e_clean_fdir_tx_irq(q_vector->tx.ring, vsi->work_limit);
3860
3861 return IRQ_HANDLED;
3862}
3863
41c445ff 3864/**
cd0b6fa6 3865 * i40e_map_vector_to_qp - Assigns the queue pair to the vector
41c445ff
JB
3866 * @vsi: the VSI being configured
3867 * @v_idx: vector index
cd0b6fa6 3868 * @qp_idx: queue pair index
41c445ff 3869 **/
26cdc443 3870static void i40e_map_vector_to_qp(struct i40e_vsi *vsi, int v_idx, int qp_idx)
41c445ff 3871{
493fb300 3872 struct i40e_q_vector *q_vector = vsi->q_vectors[v_idx];
9f65e15b
AD
3873 struct i40e_ring *tx_ring = vsi->tx_rings[qp_idx];
3874 struct i40e_ring *rx_ring = vsi->rx_rings[qp_idx];
41c445ff
JB
3875
3876 tx_ring->q_vector = q_vector;
cd0b6fa6
AD
3877 tx_ring->next = q_vector->tx.ring;
3878 q_vector->tx.ring = tx_ring;
41c445ff 3879 q_vector->tx.count++;
cd0b6fa6 3880
74608d17
BT
3881 /* Place XDP Tx ring in the same q_vector ring list as regular Tx */
3882 if (i40e_enabled_xdp_vsi(vsi)) {
3883 struct i40e_ring *xdp_ring = vsi->xdp_rings[qp_idx];
3884
3885 xdp_ring->q_vector = q_vector;
3886 xdp_ring->next = q_vector->tx.ring;
3887 q_vector->tx.ring = xdp_ring;
3888 q_vector->tx.count++;
3889 }
3890
cd0b6fa6
AD
3891 rx_ring->q_vector = q_vector;
3892 rx_ring->next = q_vector->rx.ring;
3893 q_vector->rx.ring = rx_ring;
3894 q_vector->rx.count++;
41c445ff
JB
3895}
3896
3897/**
3898 * i40e_vsi_map_rings_to_vectors - Maps descriptor rings to vectors
3899 * @vsi: the VSI being configured
3900 *
3901 * This function maps descriptor rings to the queue-specific vectors
3902 * we were allotted through the MSI-X enabling code. Ideally, we'd have
3903 * one vector per queue pair, but on a constrained vector budget, we
3904 * group the queue pairs as "efficiently" as possible.
3905 **/
3906static void i40e_vsi_map_rings_to_vectors(struct i40e_vsi *vsi)
3907{
3908 int qp_remaining = vsi->num_queue_pairs;
3909 int q_vectors = vsi->num_q_vectors;
cd0b6fa6 3910 int num_ringpairs;
41c445ff
JB
3911 int v_start = 0;
3912 int qp_idx = 0;
3913
3914 /* If we don't have enough vectors for a 1-to-1 mapping, we'll have to
3915 * group them so there are multiple queues per vector.
70114ec4
ASJ
3916 * It is also important to go through all the vectors available to be
3917 * sure that if we don't use all the vectors, that the remaining vectors
3918 * are cleared. This is especially important when decreasing the
3919 * number of queues in use.
41c445ff 3920 */
70114ec4 3921 for (; v_start < q_vectors; v_start++) {
cd0b6fa6
AD
3922 struct i40e_q_vector *q_vector = vsi->q_vectors[v_start];
3923
3924 num_ringpairs = DIV_ROUND_UP(qp_remaining, q_vectors - v_start);
3925
3926 q_vector->num_ringpairs = num_ringpairs;
3927
3928 q_vector->rx.count = 0;
3929 q_vector->tx.count = 0;
3930 q_vector->rx.ring = NULL;
3931 q_vector->tx.ring = NULL;
3932
3933 while (num_ringpairs--) {
26cdc443 3934 i40e_map_vector_to_qp(vsi, v_start, qp_idx);
cd0b6fa6
AD
3935 qp_idx++;
3936 qp_remaining--;
41c445ff
JB
3937 }
3938 }
3939}
3940
3941/**
3942 * i40e_vsi_request_irq - Request IRQ from the OS
3943 * @vsi: the VSI being configured
3944 * @basename: name for the vector
3945 **/
3946static int i40e_vsi_request_irq(struct i40e_vsi *vsi, char *basename)
3947{
3948 struct i40e_pf *pf = vsi->back;
3949 int err;
3950
3951 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
3952 err = i40e_vsi_request_irq_msix(vsi, basename);
3953 else if (pf->flags & I40E_FLAG_MSI_ENABLED)
3954 err = request_irq(pf->pdev->irq, i40e_intr, 0,
b294ac70 3955 pf->int_name, pf);
41c445ff
JB
3956 else
3957 err = request_irq(pf->pdev->irq, i40e_intr, IRQF_SHARED,
b294ac70 3958 pf->int_name, pf);
41c445ff
JB
3959
3960 if (err)
3961 dev_info(&pf->pdev->dev, "request_irq failed, Error %d\n", err);
3962
3963 return err;
3964}
3965
3966#ifdef CONFIG_NET_POLL_CONTROLLER
3967/**
d89d967f 3968 * i40e_netpoll - A Polling 'interrupt' handler
41c445ff
JB
3969 * @netdev: network interface device structure
3970 *
3971 * This is used by netconsole to send skbs without having to re-enable
3972 * interrupts. It's not called while the normal interrupt routine is executing.
3973 **/
3974static void i40e_netpoll(struct net_device *netdev)
3975{
3976 struct i40e_netdev_priv *np = netdev_priv(netdev);
3977 struct i40e_vsi *vsi = np->vsi;
3978 struct i40e_pf *pf = vsi->back;
3979 int i;
3980
3981 /* if interface is down do nothing */
0da36b97 3982 if (test_bit(__I40E_VSI_DOWN, vsi->state))
41c445ff
JB
3983 return;
3984
41c445ff
JB
3985 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
3986 for (i = 0; i < vsi->num_q_vectors; i++)
493fb300 3987 i40e_msix_clean_rings(0, vsi->q_vectors[i]);
41c445ff
JB
3988 } else {
3989 i40e_intr(pf->pdev->irq, netdev);
3990 }
41c445ff
JB
3991}
3992#endif
3993
c768e490
JK
3994#define I40E_QTX_ENA_WAIT_COUNT 50
3995
23527308
NP
3996/**
3997 * i40e_pf_txq_wait - Wait for a PF's Tx queue to be enabled or disabled
3998 * @pf: the PF being configured
3999 * @pf_q: the PF queue
4000 * @enable: enable or disable state of the queue
4001 *
4002 * This routine will wait for the given Tx queue of the PF to reach the
4003 * enabled or disabled state.
4004 * Returns -ETIMEDOUT in case of failing to reach the requested state after
4005 * multiple retries; else will return 0 in case of success.
4006 **/
4007static int i40e_pf_txq_wait(struct i40e_pf *pf, int pf_q, bool enable)
4008{
4009 int i;
4010 u32 tx_reg;
4011
4012 for (i = 0; i < I40E_QUEUE_WAIT_RETRY_LIMIT; i++) {
4013 tx_reg = rd32(&pf->hw, I40E_QTX_ENA(pf_q));
4014 if (enable == !!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
4015 break;
4016
f98a2006 4017 usleep_range(10, 20);
23527308
NP
4018 }
4019 if (i >= I40E_QUEUE_WAIT_RETRY_LIMIT)
4020 return -ETIMEDOUT;
4021
4022 return 0;
4023}
4024
c768e490
JK
4025/**
4026 * i40e_control_tx_q - Start or stop a particular Tx queue
4027 * @pf: the PF structure
4028 * @pf_q: the PF queue to configure
4029 * @enable: start or stop the queue
4030 *
4031 * This function enables or disables a single queue. Note that any delay
4032 * required after the operation is expected to be handled by the caller of
4033 * this function.
4034 **/
4035static void i40e_control_tx_q(struct i40e_pf *pf, int pf_q, bool enable)
4036{
4037 struct i40e_hw *hw = &pf->hw;
4038 u32 tx_reg;
4039 int i;
4040
4041 /* warn the TX unit of coming changes */
4042 i40e_pre_tx_queue_cfg(&pf->hw, pf_q, enable);
4043 if (!enable)
4044 usleep_range(10, 20);
4045
4046 for (i = 0; i < I40E_QTX_ENA_WAIT_COUNT; i++) {
4047 tx_reg = rd32(hw, I40E_QTX_ENA(pf_q));
4048 if (((tx_reg >> I40E_QTX_ENA_QENA_REQ_SHIFT) & 1) ==
4049 ((tx_reg >> I40E_QTX_ENA_QENA_STAT_SHIFT) & 1))
4050 break;
4051 usleep_range(1000, 2000);
4052 }
4053
4054 /* Skip if the queue is already in the requested state */
4055 if (enable == !!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
4056 return;
4057
4058 /* turn on/off the queue */
4059 if (enable) {
4060 wr32(hw, I40E_QTX_HEAD(pf_q), 0);
4061 tx_reg |= I40E_QTX_ENA_QENA_REQ_MASK;
4062 } else {
4063 tx_reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
4064 }
4065
4066 wr32(hw, I40E_QTX_ENA(pf_q), tx_reg);
4067}
4068
74608d17
BT
4069/**
4070 * i40e_control_wait_tx_q - Start/stop Tx queue and wait for completion
4071 * @seid: VSI SEID
4072 * @pf: the PF structure
4073 * @pf_q: the PF queue to configure
4074 * @is_xdp: true if the queue is used for XDP
4075 * @enable: start or stop the queue
4076 **/
4077static int i40e_control_wait_tx_q(int seid, struct i40e_pf *pf, int pf_q,
4078 bool is_xdp, bool enable)
4079{
4080 int ret;
4081
4082 i40e_control_tx_q(pf, pf_q, enable);
4083
4084 /* wait for the change to finish */
4085 ret = i40e_pf_txq_wait(pf, pf_q, enable);
4086 if (ret) {
4087 dev_info(&pf->pdev->dev,
4088 "VSI seid %d %sTx ring %d %sable timeout\n",
4089 seid, (is_xdp ? "XDP " : ""), pf_q,
4090 (enable ? "en" : "dis"));
4091 }
4092
4093 return ret;
4094}
4095
41c445ff
JB
4096/**
4097 * i40e_vsi_control_tx - Start or stop a VSI's rings
4098 * @vsi: the VSI being configured
4099 * @enable: start or stop the rings
4100 **/
4101static int i40e_vsi_control_tx(struct i40e_vsi *vsi, bool enable)
4102{
4103 struct i40e_pf *pf = vsi->back;
c768e490 4104 int i, pf_q, ret = 0;
41c445ff
JB
4105
4106 pf_q = vsi->base_queue;
4107 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
74608d17
BT
4108 ret = i40e_control_wait_tx_q(vsi->seid, pf,
4109 pf_q,
4110 false /*is xdp*/, enable);
4111 if (ret)
4112 break;
351499ab 4113
74608d17
BT
4114 if (!i40e_enabled_xdp_vsi(vsi))
4115 continue;
4116
4117 ret = i40e_control_wait_tx_q(vsi->seid, pf,
4118 pf_q + vsi->alloc_queue_pairs,
4119 true /*is xdp*/, enable);
4120 if (ret)
23527308 4121 break;
41c445ff
JB
4122 }
4123
23527308
NP
4124 return ret;
4125}
4126
4127/**
4128 * i40e_pf_rxq_wait - Wait for a PF's Rx queue to be enabled or disabled
4129 * @pf: the PF being configured
4130 * @pf_q: the PF queue
4131 * @enable: enable or disable state of the queue
4132 *
4133 * This routine will wait for the given Rx queue of the PF to reach the
4134 * enabled or disabled state.
4135 * Returns -ETIMEDOUT in case of failing to reach the requested state after
4136 * multiple retries; else will return 0 in case of success.
4137 **/
4138static int i40e_pf_rxq_wait(struct i40e_pf *pf, int pf_q, bool enable)
4139{
4140 int i;
4141 u32 rx_reg;
4142
4143 for (i = 0; i < I40E_QUEUE_WAIT_RETRY_LIMIT; i++) {
4144 rx_reg = rd32(&pf->hw, I40E_QRX_ENA(pf_q));
4145 if (enable == !!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
4146 break;
4147
f98a2006 4148 usleep_range(10, 20);
23527308
NP
4149 }
4150 if (i >= I40E_QUEUE_WAIT_RETRY_LIMIT)
4151 return -ETIMEDOUT;
7134f9ce 4152
41c445ff
JB
4153 return 0;
4154}
4155
c768e490
JK
4156/**
4157 * i40e_control_rx_q - Start or stop a particular Rx queue
4158 * @pf: the PF structure
4159 * @pf_q: the PF queue to configure
4160 * @enable: start or stop the queue
4161 *
4162 * This function enables or disables a single queue. Note that any delay
4163 * required after the operation is expected to be handled by the caller of
4164 * this function.
4165 **/
4166static void i40e_control_rx_q(struct i40e_pf *pf, int pf_q, bool enable)
4167{
4168 struct i40e_hw *hw = &pf->hw;
4169 u32 rx_reg;
4170 int i;
4171
4172 for (i = 0; i < I40E_QTX_ENA_WAIT_COUNT; i++) {
4173 rx_reg = rd32(hw, I40E_QRX_ENA(pf_q));
4174 if (((rx_reg >> I40E_QRX_ENA_QENA_REQ_SHIFT) & 1) ==
4175 ((rx_reg >> I40E_QRX_ENA_QENA_STAT_SHIFT) & 1))
4176 break;
4177 usleep_range(1000, 2000);
4178 }
4179
4180 /* Skip if the queue is already in the requested state */
4181 if (enable == !!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
4182 return;
4183
4184 /* turn on/off the queue */
4185 if (enable)
4186 rx_reg |= I40E_QRX_ENA_QENA_REQ_MASK;
4187 else
4188 rx_reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
4189
4190 wr32(hw, I40E_QRX_ENA(pf_q), rx_reg);
4191}
4192
41c445ff
JB
4193/**
4194 * i40e_vsi_control_rx - Start or stop a VSI's rings
4195 * @vsi: the VSI being configured
4196 * @enable: start or stop the rings
4197 **/
4198static int i40e_vsi_control_rx(struct i40e_vsi *vsi, bool enable)
4199{
4200 struct i40e_pf *pf = vsi->back;
c768e490 4201 int i, pf_q, ret = 0;
41c445ff
JB
4202
4203 pf_q = vsi->base_queue;
4204 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
c768e490 4205 i40e_control_rx_q(pf, pf_q, enable);
41c445ff 4206
41c445ff 4207 /* wait for the change to finish */
23527308
NP
4208 ret = i40e_pf_rxq_wait(pf, pf_q, enable);
4209 if (ret) {
4210 dev_info(&pf->pdev->dev,
fb43201f
SN
4211 "VSI seid %d Rx ring %d %sable timeout\n",
4212 vsi->seid, pf_q, (enable ? "en" : "dis"));
23527308 4213 break;
41c445ff
JB
4214 }
4215 }
4216
d08a9f6c
WC
4217 /* Due to HW errata, on Rx disable only, the register can indicate done
4218 * before it really is. Needs 50ms to be sure
4219 */
4220 if (!enable)
4221 mdelay(50);
4222
23527308 4223 return ret;
41c445ff
JB
4224}
4225
4226/**
3aa7b74d 4227 * i40e_vsi_start_rings - Start a VSI's rings
41c445ff 4228 * @vsi: the VSI being configured
41c445ff 4229 **/
3aa7b74d 4230int i40e_vsi_start_rings(struct i40e_vsi *vsi)
41c445ff 4231{
3b867b28 4232 int ret = 0;
41c445ff
JB
4233
4234 /* do rx first for enable and last for disable */
3aa7b74d
FS
4235 ret = i40e_vsi_control_rx(vsi, true);
4236 if (ret)
4237 return ret;
4238 ret = i40e_vsi_control_tx(vsi, true);
41c445ff
JB
4239
4240 return ret;
4241}
4242
3aa7b74d
FS
4243/**
4244 * i40e_vsi_stop_rings - Stop a VSI's rings
4245 * @vsi: the VSI being configured
4246 **/
4247void i40e_vsi_stop_rings(struct i40e_vsi *vsi)
4248{
3480756f 4249 /* When port TX is suspended, don't wait */
0da36b97 4250 if (test_bit(__I40E_PORT_SUSPENDED, vsi->back->state))
3480756f
JK
4251 return i40e_vsi_stop_rings_no_wait(vsi);
4252
3aa7b74d
FS
4253 /* do rx first for enable and last for disable
4254 * Ignore return value, we need to shutdown whatever we can
4255 */
4256 i40e_vsi_control_tx(vsi, false);
4257 i40e_vsi_control_rx(vsi, false);
4258}
4259
e4b433f4
JK
4260/**
4261 * i40e_vsi_stop_rings_no_wait - Stop a VSI's rings and do not delay
4262 * @vsi: the VSI being shutdown
4263 *
4264 * This function stops all the rings for a VSI but does not delay to verify
4265 * that rings have been disabled. It is expected that the caller is shutting
4266 * down multiple VSIs at once and will delay together for all the VSIs after
4267 * initiating the shutdown. This is particularly useful for shutting down lots
4268 * of VFs together. Otherwise, a large delay can be incurred while configuring
4269 * each VSI in serial.
4270 **/
4271void i40e_vsi_stop_rings_no_wait(struct i40e_vsi *vsi)
4272{
4273 struct i40e_pf *pf = vsi->back;
4274 int i, pf_q;
4275
4276 pf_q = vsi->base_queue;
4277 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
4278 i40e_control_tx_q(pf, pf_q, false);
4279 i40e_control_rx_q(pf, pf_q, false);
4280 }
4281}
4282
41c445ff
JB
4283/**
4284 * i40e_vsi_free_irq - Free the irq association with the OS
4285 * @vsi: the VSI being configured
4286 **/
4287static void i40e_vsi_free_irq(struct i40e_vsi *vsi)
4288{
4289 struct i40e_pf *pf = vsi->back;
4290 struct i40e_hw *hw = &pf->hw;
4291 int base = vsi->base_vector;
4292 u32 val, qp;
4293 int i;
4294
4295 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
4296 if (!vsi->q_vectors)
4297 return;
4298
63741846
SN
4299 if (!vsi->irqs_ready)
4300 return;
4301
4302 vsi->irqs_ready = false;
41c445ff 4303 for (i = 0; i < vsi->num_q_vectors; i++) {
96db776a
AB
4304 int irq_num;
4305 u16 vector;
4306
4307 vector = i + base;
4308 irq_num = pf->msix_entries[vector].vector;
41c445ff
JB
4309
4310 /* free only the irqs that were actually requested */
78681b1f
SN
4311 if (!vsi->q_vectors[i] ||
4312 !vsi->q_vectors[i]->num_ringpairs)
41c445ff
JB
4313 continue;
4314
96db776a
AB
4315 /* clear the affinity notifier in the IRQ descriptor */
4316 irq_set_affinity_notifier(irq_num, NULL);
759dc4a7 4317 /* remove our suggested affinity mask for this IRQ */
96db776a
AB
4318 irq_set_affinity_hint(irq_num, NULL);
4319 synchronize_irq(irq_num);
4320 free_irq(irq_num, vsi->q_vectors[i]);
41c445ff
JB
4321
4322 /* Tear down the interrupt queue link list
4323 *
4324 * We know that they come in pairs and always
4325 * the Rx first, then the Tx. To clear the
4326 * link list, stick the EOL value into the
4327 * next_q field of the registers.
4328 */
4329 val = rd32(hw, I40E_PFINT_LNKLSTN(vector - 1));
4330 qp = (val & I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK)
4331 >> I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT;
4332 val |= I40E_QUEUE_END_OF_LIST
4333 << I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT;
4334 wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), val);
4335
4336 while (qp != I40E_QUEUE_END_OF_LIST) {
4337 u32 next;
4338
4339 val = rd32(hw, I40E_QINT_RQCTL(qp));
4340
4341 val &= ~(I40E_QINT_RQCTL_MSIX_INDX_MASK |
4342 I40E_QINT_RQCTL_MSIX0_INDX_MASK |
4343 I40E_QINT_RQCTL_CAUSE_ENA_MASK |
4344 I40E_QINT_RQCTL_INTEVENT_MASK);
4345
4346 val |= (I40E_QINT_RQCTL_ITR_INDX_MASK |
4347 I40E_QINT_RQCTL_NEXTQ_INDX_MASK);
4348
4349 wr32(hw, I40E_QINT_RQCTL(qp), val);
4350
4351 val = rd32(hw, I40E_QINT_TQCTL(qp));
4352
4353 next = (val & I40E_QINT_TQCTL_NEXTQ_INDX_MASK)
4354 >> I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT;
4355
4356 val &= ~(I40E_QINT_TQCTL_MSIX_INDX_MASK |
4357 I40E_QINT_TQCTL_MSIX0_INDX_MASK |
4358 I40E_QINT_TQCTL_CAUSE_ENA_MASK |
4359 I40E_QINT_TQCTL_INTEVENT_MASK);
4360
4361 val |= (I40E_QINT_TQCTL_ITR_INDX_MASK |
4362 I40E_QINT_TQCTL_NEXTQ_INDX_MASK);
4363
4364 wr32(hw, I40E_QINT_TQCTL(qp), val);
4365 qp = next;
4366 }
4367 }
4368 } else {
4369 free_irq(pf->pdev->irq, pf);
4370
4371 val = rd32(hw, I40E_PFINT_LNKLST0);
4372 qp = (val & I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK)
4373 >> I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT;
4374 val |= I40E_QUEUE_END_OF_LIST
4375 << I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT;
4376 wr32(hw, I40E_PFINT_LNKLST0, val);
4377
4378 val = rd32(hw, I40E_QINT_RQCTL(qp));
4379 val &= ~(I40E_QINT_RQCTL_MSIX_INDX_MASK |
4380 I40E_QINT_RQCTL_MSIX0_INDX_MASK |
4381 I40E_QINT_RQCTL_CAUSE_ENA_MASK |
4382 I40E_QINT_RQCTL_INTEVENT_MASK);
4383
4384 val |= (I40E_QINT_RQCTL_ITR_INDX_MASK |
4385 I40E_QINT_RQCTL_NEXTQ_INDX_MASK);
4386
4387 wr32(hw, I40E_QINT_RQCTL(qp), val);
4388
4389 val = rd32(hw, I40E_QINT_TQCTL(qp));
4390
4391 val &= ~(I40E_QINT_TQCTL_MSIX_INDX_MASK |
4392 I40E_QINT_TQCTL_MSIX0_INDX_MASK |
4393 I40E_QINT_TQCTL_CAUSE_ENA_MASK |
4394 I40E_QINT_TQCTL_INTEVENT_MASK);
4395
4396 val |= (I40E_QINT_TQCTL_ITR_INDX_MASK |
4397 I40E_QINT_TQCTL_NEXTQ_INDX_MASK);
4398
4399 wr32(hw, I40E_QINT_TQCTL(qp), val);
4400 }
4401}
4402
493fb300
AD
4403/**
4404 * i40e_free_q_vector - Free memory allocated for specific interrupt vector
4405 * @vsi: the VSI being configured
4406 * @v_idx: Index of vector to be freed
4407 *
4408 * This function frees the memory allocated to the q_vector. In addition if
4409 * NAPI is enabled it will delete any references to the NAPI struct prior
4410 * to freeing the q_vector.
4411 **/
4412static void i40e_free_q_vector(struct i40e_vsi *vsi, int v_idx)
4413{
4414 struct i40e_q_vector *q_vector = vsi->q_vectors[v_idx];
cd0b6fa6 4415 struct i40e_ring *ring;
493fb300
AD
4416
4417 if (!q_vector)
4418 return;
4419
4420 /* disassociate q_vector from rings */
cd0b6fa6
AD
4421 i40e_for_each_ring(ring, q_vector->tx)
4422 ring->q_vector = NULL;
4423
4424 i40e_for_each_ring(ring, q_vector->rx)
4425 ring->q_vector = NULL;
493fb300
AD
4426
4427 /* only VSI w/ an associated netdev is set up w/ NAPI */
4428 if (vsi->netdev)
4429 netif_napi_del(&q_vector->napi);
4430
4431 vsi->q_vectors[v_idx] = NULL;
4432
4433 kfree_rcu(q_vector, rcu);
4434}
4435
41c445ff
JB
4436/**
4437 * i40e_vsi_free_q_vectors - Free memory allocated for interrupt vectors
4438 * @vsi: the VSI being un-configured
4439 *
4440 * This frees the memory allocated to the q_vectors and
4441 * deletes references to the NAPI struct.
4442 **/
4443static void i40e_vsi_free_q_vectors(struct i40e_vsi *vsi)
4444{
4445 int v_idx;
4446
493fb300
AD
4447 for (v_idx = 0; v_idx < vsi->num_q_vectors; v_idx++)
4448 i40e_free_q_vector(vsi, v_idx);
41c445ff
JB
4449}
4450
4451/**
4452 * i40e_reset_interrupt_capability - Disable interrupt setup in OS
4453 * @pf: board private structure
4454 **/
4455static void i40e_reset_interrupt_capability(struct i40e_pf *pf)
4456{
4457 /* If we're in Legacy mode, the interrupt was cleaned in vsi_close */
4458 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
4459 pci_disable_msix(pf->pdev);
4460 kfree(pf->msix_entries);
4461 pf->msix_entries = NULL;
3b444399
SN
4462 kfree(pf->irq_pile);
4463 pf->irq_pile = NULL;
41c445ff
JB
4464 } else if (pf->flags & I40E_FLAG_MSI_ENABLED) {
4465 pci_disable_msi(pf->pdev);
4466 }
4467 pf->flags &= ~(I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED);
4468}
4469
4470/**
4471 * i40e_clear_interrupt_scheme - Clear the current interrupt scheme settings
4472 * @pf: board private structure
4473 *
4474 * We go through and clear interrupt specific resources and reset the structure
4475 * to pre-load conditions
4476 **/
4477static void i40e_clear_interrupt_scheme(struct i40e_pf *pf)
4478{
4479 int i;
4480
c17401a1 4481 i40e_free_misc_vector(pf);
e147758d 4482
e3219ce6
ASJ
4483 i40e_put_lump(pf->irq_pile, pf->iwarp_base_vector,
4484 I40E_IWARP_IRQ_PILE_ID);
4485
41c445ff 4486 i40e_put_lump(pf->irq_pile, 0, I40E_PILE_VALID_BIT-1);
505682cd 4487 for (i = 0; i < pf->num_alloc_vsi; i++)
41c445ff
JB
4488 if (pf->vsi[i])
4489 i40e_vsi_free_q_vectors(pf->vsi[i]);
4490 i40e_reset_interrupt_capability(pf);
4491}
4492
4493/**
4494 * i40e_napi_enable_all - Enable NAPI for all q_vectors in the VSI
4495 * @vsi: the VSI being configured
4496 **/
4497static void i40e_napi_enable_all(struct i40e_vsi *vsi)
4498{
4499 int q_idx;
4500
4501 if (!vsi->netdev)
4502 return;
4503
13a8cd19
AD
4504 for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) {
4505 struct i40e_q_vector *q_vector = vsi->q_vectors[q_idx];
4506
4507 if (q_vector->rx.ring || q_vector->tx.ring)
4508 napi_enable(&q_vector->napi);
4509 }
41c445ff
JB
4510}
4511
4512/**
4513 * i40e_napi_disable_all - Disable NAPI for all q_vectors in the VSI
4514 * @vsi: the VSI being configured
4515 **/
4516static void i40e_napi_disable_all(struct i40e_vsi *vsi)
4517{
4518 int q_idx;
4519
4520 if (!vsi->netdev)
4521 return;
4522
13a8cd19
AD
4523 for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) {
4524 struct i40e_q_vector *q_vector = vsi->q_vectors[q_idx];
4525
4526 if (q_vector->rx.ring || q_vector->tx.ring)
4527 napi_disable(&q_vector->napi);
4528 }
41c445ff
JB
4529}
4530
90ef8d47
SN
4531/**
4532 * i40e_vsi_close - Shut down a VSI
4533 * @vsi: the vsi to be quelled
4534 **/
4535static void i40e_vsi_close(struct i40e_vsi *vsi)
4536{
0ef2d5af 4537 struct i40e_pf *pf = vsi->back;
0da36b97 4538 if (!test_and_set_bit(__I40E_VSI_DOWN, vsi->state))
90ef8d47
SN
4539 i40e_down(vsi);
4540 i40e_vsi_free_irq(vsi);
4541 i40e_vsi_free_tx_resources(vsi);
4542 i40e_vsi_free_rx_resources(vsi);
92faef85 4543 vsi->current_netdev_flags = 0;
0ef2d5af 4544 pf->flags |= I40E_FLAG_SERVICE_CLIENT_REQUESTED;
0da36b97 4545 if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
0ef2d5af 4546 pf->flags |= I40E_FLAG_CLIENT_RESET;
90ef8d47
SN
4547}
4548
41c445ff
JB
4549/**
4550 * i40e_quiesce_vsi - Pause a given VSI
4551 * @vsi: the VSI being paused
4552 **/
4553static void i40e_quiesce_vsi(struct i40e_vsi *vsi)
4554{
0da36b97 4555 if (test_bit(__I40E_VSI_DOWN, vsi->state))
41c445ff
JB
4556 return;
4557
0da36b97 4558 set_bit(__I40E_VSI_NEEDS_RESTART, vsi->state);
6995b36c 4559 if (vsi->netdev && netif_running(vsi->netdev))
41c445ff 4560 vsi->netdev->netdev_ops->ndo_stop(vsi->netdev);
6995b36c 4561 else
90ef8d47 4562 i40e_vsi_close(vsi);
41c445ff
JB
4563}
4564
4565/**
4566 * i40e_unquiesce_vsi - Resume a given VSI
4567 * @vsi: the VSI being resumed
4568 **/
4569static void i40e_unquiesce_vsi(struct i40e_vsi *vsi)
4570{
0da36b97 4571 if (!test_and_clear_bit(__I40E_VSI_NEEDS_RESTART, vsi->state))
41c445ff
JB
4572 return;
4573
41c445ff
JB
4574 if (vsi->netdev && netif_running(vsi->netdev))
4575 vsi->netdev->netdev_ops->ndo_open(vsi->netdev);
4576 else
8276f757 4577 i40e_vsi_open(vsi); /* this clears the DOWN bit */
41c445ff
JB
4578}
4579
4580/**
4581 * i40e_pf_quiesce_all_vsi - Pause all VSIs on a PF
4582 * @pf: the PF
4583 **/
4584static void i40e_pf_quiesce_all_vsi(struct i40e_pf *pf)
4585{
4586 int v;
4587
505682cd 4588 for (v = 0; v < pf->num_alloc_vsi; v++) {
41c445ff
JB
4589 if (pf->vsi[v])
4590 i40e_quiesce_vsi(pf->vsi[v]);
4591 }
4592}
4593
4594/**
4595 * i40e_pf_unquiesce_all_vsi - Resume all VSIs on a PF
4596 * @pf: the PF
4597 **/
4598static void i40e_pf_unquiesce_all_vsi(struct i40e_pf *pf)
4599{
4600 int v;
4601
505682cd 4602 for (v = 0; v < pf->num_alloc_vsi; v++) {
41c445ff
JB
4603 if (pf->vsi[v])
4604 i40e_unquiesce_vsi(pf->vsi[v]);
4605 }
4606}
4607
69129dc3 4608/**
3fe06f41 4609 * i40e_vsi_wait_queues_disabled - Wait for VSI's queues to be disabled
69129dc3
NP
4610 * @vsi: the VSI being configured
4611 *
af26ce2d 4612 * Wait until all queues on a given VSI have been disabled.
69129dc3 4613 **/
e4b433f4 4614int i40e_vsi_wait_queues_disabled(struct i40e_vsi *vsi)
69129dc3
NP
4615{
4616 struct i40e_pf *pf = vsi->back;
4617 int i, pf_q, ret;
4618
4619 pf_q = vsi->base_queue;
4620 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
af26ce2d 4621 /* Check and wait for the Tx queue */
69129dc3
NP
4622 ret = i40e_pf_txq_wait(pf, pf_q, false);
4623 if (ret) {
4624 dev_info(&pf->pdev->dev,
fb43201f
SN
4625 "VSI seid %d Tx ring %d disable timeout\n",
4626 vsi->seid, pf_q);
69129dc3
NP
4627 return ret;
4628 }
74608d17
BT
4629
4630 if (!i40e_enabled_xdp_vsi(vsi))
4631 goto wait_rx;
4632
4633 /* Check and wait for the XDP Tx queue */
4634 ret = i40e_pf_txq_wait(pf, pf_q + vsi->alloc_queue_pairs,
4635 false);
4636 if (ret) {
4637 dev_info(&pf->pdev->dev,
4638 "VSI seid %d XDP Tx ring %d disable timeout\n",
4639 vsi->seid, pf_q);
4640 return ret;
4641 }
4642wait_rx:
4643 /* Check and wait for the Rx queue */
3fe06f41
NP
4644 ret = i40e_pf_rxq_wait(pf, pf_q, false);
4645 if (ret) {
4646 dev_info(&pf->pdev->dev,
4647 "VSI seid %d Rx ring %d disable timeout\n",
4648 vsi->seid, pf_q);
4649 return ret;
4650 }
4651 }
4652
69129dc3
NP
4653 return 0;
4654}
4655
e4b433f4 4656#ifdef CONFIG_I40E_DCB
69129dc3 4657/**
3fe06f41 4658 * i40e_pf_wait_queues_disabled - Wait for all queues of PF VSIs to be disabled
69129dc3
NP
4659 * @pf: the PF
4660 *
3fe06f41 4661 * This function waits for the queues to be in disabled state for all the
69129dc3
NP
4662 * VSIs that are managed by this PF.
4663 **/
3fe06f41 4664static int i40e_pf_wait_queues_disabled(struct i40e_pf *pf)
69129dc3
NP
4665{
4666 int v, ret = 0;
4667
4668 for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
c76cb6ed 4669 if (pf->vsi[v]) {
3fe06f41 4670 ret = i40e_vsi_wait_queues_disabled(pf->vsi[v]);
69129dc3
NP
4671 if (ret)
4672 break;
4673 }
4674 }
4675
4676 return ret;
4677}
4678
4679#endif
b03a8c1f
KP
4680
4681/**
4682 * i40e_detect_recover_hung_queue - Function to detect and recover hung_queue
4683 * @q_idx: TX queue number
4684 * @vsi: Pointer to VSI struct
4685 *
4686 * This function checks specified queue for given VSI. Detects hung condition.
17daabb5
AB
4687 * We proactively detect hung TX queues by checking if interrupts are disabled
4688 * but there are pending descriptors. If it appears hung, attempt to recover
4689 * by triggering a SW interrupt.
b03a8c1f
KP
4690 **/
4691static void i40e_detect_recover_hung_queue(int q_idx, struct i40e_vsi *vsi)
4692{
4693 struct i40e_ring *tx_ring = NULL;
4694 struct i40e_pf *pf;
17daabb5 4695 u32 val, tx_pending;
b03a8c1f
KP
4696 int i;
4697
4698 pf = vsi->back;
4699
4700 /* now that we have an index, find the tx_ring struct */
4701 for (i = 0; i < vsi->num_queue_pairs; i++) {
4702 if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) {
4703 if (q_idx == vsi->tx_rings[i]->queue_index) {
4704 tx_ring = vsi->tx_rings[i];
4705 break;
4706 }
4707 }
4708 }
4709
4710 if (!tx_ring)
4711 return;
4712
4713 /* Read interrupt register */
4714 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
4715 val = rd32(&pf->hw,
4716 I40E_PFINT_DYN_CTLN(tx_ring->q_vector->v_idx +
4717 tx_ring->vsi->base_vector - 1));
4718 else
4719 val = rd32(&pf->hw, I40E_PFINT_DYN_CTL0);
4720
17daabb5 4721 tx_pending = i40e_get_tx_pending(tx_ring);
b03a8c1f 4722
17daabb5
AB
4723 /* Interrupts are disabled and TX pending is non-zero,
4724 * trigger the SW interrupt (don't wait). Worst case
4725 * there will be one extra interrupt which may result
4726 * into not cleaning any queues because queues are cleaned.
b03a8c1f 4727 */
17daabb5
AB
4728 if (tx_pending && (!(val & I40E_PFINT_DYN_CTLN_INTENA_MASK)))
4729 i40e_force_wb(vsi, tx_ring->q_vector);
b03a8c1f
KP
4730}
4731
4732/**
4733 * i40e_detect_recover_hung - Function to detect and recover hung_queues
4734 * @pf: pointer to PF struct
4735 *
4736 * LAN VSI has netdev and netdev has TX queues. This function is to check
4737 * each of those TX queues if they are hung, trigger recovery by issuing
4738 * SW interrupt.
4739 **/
4740static void i40e_detect_recover_hung(struct i40e_pf *pf)
4741{
4742 struct net_device *netdev;
4743 struct i40e_vsi *vsi;
b85c94b6 4744 unsigned int i;
b03a8c1f
KP
4745
4746 /* Only for LAN VSI */
4747 vsi = pf->vsi[pf->lan_vsi];
4748
4749 if (!vsi)
4750 return;
4751
4752 /* Make sure, VSI state is not DOWN/RECOVERY_PENDING */
0da36b97
JK
4753 if (test_bit(__I40E_VSI_DOWN, vsi->back->state) ||
4754 test_bit(__I40E_RESET_RECOVERY_PENDING, vsi->back->state))
b03a8c1f
KP
4755 return;
4756
4757 /* Make sure type is MAIN VSI */
4758 if (vsi->type != I40E_VSI_MAIN)
4759 return;
4760
4761 netdev = vsi->netdev;
4762 if (!netdev)
4763 return;
4764
4765 /* Bail out if netif_carrier is not OK */
4766 if (!netif_carrier_ok(netdev))
4767 return;
4768
4769 /* Go thru' TX queues for netdev */
4770 for (i = 0; i < netdev->num_tx_queues; i++) {
4771 struct netdev_queue *q;
4772
4773 q = netdev_get_tx_queue(netdev, i);
4774 if (q)
4775 i40e_detect_recover_hung_queue(i, vsi);
4776 }
4777}
4778
63d7e5a4
NP
4779/**
4780 * i40e_get_iscsi_tc_map - Return TC map for iSCSI APP
b40c82e6 4781 * @pf: pointer to PF
63d7e5a4
NP
4782 *
4783 * Get TC map for ISCSI PF type that will include iSCSI TC
4784 * and LAN TC.
4785 **/
4786static u8 i40e_get_iscsi_tc_map(struct i40e_pf *pf)
4787{
4788 struct i40e_dcb_app_priority_table app;
4789 struct i40e_hw *hw = &pf->hw;
4790 u8 enabled_tc = 1; /* TC0 is always enabled */
4791 u8 tc, i;
4792 /* Get the iSCSI APP TLV */
4793 struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config;
4794
4795 for (i = 0; i < dcbcfg->numapps; i++) {
4796 app = dcbcfg->app[i];
4797 if (app.selector == I40E_APP_SEL_TCPIP &&
4798 app.protocolid == I40E_APP_PROTOID_ISCSI) {
4799 tc = dcbcfg->etscfg.prioritytable[app.priority];
75f5cea9 4800 enabled_tc |= BIT(tc);
63d7e5a4
NP
4801 break;
4802 }
4803 }
4804
4805 return enabled_tc;
4806}
4807
41c445ff
JB
4808/**
4809 * i40e_dcb_get_num_tc - Get the number of TCs from DCBx config
4810 * @dcbcfg: the corresponding DCBx configuration structure
4811 *
4812 * Return the number of TCs from given DCBx configuration
4813 **/
4814static u8 i40e_dcb_get_num_tc(struct i40e_dcbx_config *dcbcfg)
4815{
fbfe12c6 4816 int i, tc_unused = 0;
078b5876 4817 u8 num_tc = 0;
fbfe12c6 4818 u8 ret = 0;
41c445ff
JB
4819
4820 /* Scan the ETS Config Priority Table to find
4821 * traffic class enabled for a given priority
fbfe12c6 4822 * and create a bitmask of enabled TCs
41c445ff 4823 */
fbfe12c6
DE
4824 for (i = 0; i < I40E_MAX_USER_PRIORITY; i++)
4825 num_tc |= BIT(dcbcfg->etscfg.prioritytable[i]);
41c445ff 4826
fbfe12c6
DE
4827 /* Now scan the bitmask to check for
4828 * contiguous TCs starting with TC0
41c445ff 4829 */
fbfe12c6
DE
4830 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
4831 if (num_tc & BIT(i)) {
4832 if (!tc_unused) {
4833 ret++;
4834 } else {
4835 pr_err("Non-contiguous TC - Disabling DCB\n");
4836 return 1;
4837 }
4838 } else {
4839 tc_unused = 1;
4840 }
4841 }
4842
4843 /* There is always at least TC0 */
4844 if (!ret)
4845 ret = 1;
4846
4847 return ret;
41c445ff
JB
4848}
4849
4850/**
4851 * i40e_dcb_get_enabled_tc - Get enabled traffic classes
4852 * @dcbcfg: the corresponding DCBx configuration structure
4853 *
4854 * Query the current DCB configuration and return the number of
4855 * traffic classes enabled from the given DCBX config
4856 **/
4857static u8 i40e_dcb_get_enabled_tc(struct i40e_dcbx_config *dcbcfg)
4858{
4859 u8 num_tc = i40e_dcb_get_num_tc(dcbcfg);
4860 u8 enabled_tc = 1;
4861 u8 i;
4862
4863 for (i = 0; i < num_tc; i++)
41a1d04b 4864 enabled_tc |= BIT(i);
41c445ff
JB
4865
4866 return enabled_tc;
4867}
4868
4869/**
4870 * i40e_pf_get_num_tc - Get enabled traffic classes for PF
4871 * @pf: PF being queried
4872 *
4873 * Return number of traffic classes enabled for the given PF
4874 **/
4875static u8 i40e_pf_get_num_tc(struct i40e_pf *pf)
4876{
4877 struct i40e_hw *hw = &pf->hw;
52a08caa 4878 u8 i, enabled_tc = 1;
41c445ff
JB
4879 u8 num_tc = 0;
4880 struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config;
4881
4882 /* If DCB is not enabled then always in single TC */
4883 if (!(pf->flags & I40E_FLAG_DCB_ENABLED))
4884 return 1;
4885
63d7e5a4
NP
4886 /* SFP mode will be enabled for all TCs on port */
4887 if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
4888 return i40e_dcb_get_num_tc(dcbcfg);
4889
41c445ff 4890 /* MFP mode return count of enabled TCs for this PF */
63d7e5a4
NP
4891 if (pf->hw.func_caps.iscsi)
4892 enabled_tc = i40e_get_iscsi_tc_map(pf);
4893 else
fc51de96 4894 return 1; /* Only TC0 */
41c445ff 4895
63d7e5a4 4896 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
75f5cea9 4897 if (enabled_tc & BIT(i))
63d7e5a4
NP
4898 num_tc++;
4899 }
4900 return num_tc;
41c445ff
JB
4901}
4902
41c445ff
JB
4903/**
4904 * i40e_pf_get_pf_tc_map - Get bitmap for enabled traffic classes
4905 * @pf: PF being queried
4906 *
4907 * Return a bitmap for enabled traffic classes for this PF.
4908 **/
4909static u8 i40e_pf_get_tc_map(struct i40e_pf *pf)
4910{
4911 /* If DCB is not enabled for this PF then just return default TC */
4912 if (!(pf->flags & I40E_FLAG_DCB_ENABLED))
ea6acb7e 4913 return I40E_DEFAULT_TRAFFIC_CLASS;
41c445ff 4914
41c445ff 4915 /* SFP mode we want PF to be enabled for all TCs */
63d7e5a4
NP
4916 if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
4917 return i40e_dcb_get_enabled_tc(&pf->hw.local_dcbx_config);
4918
fc51de96 4919 /* MFP enabled and iSCSI PF type */
63d7e5a4
NP
4920 if (pf->hw.func_caps.iscsi)
4921 return i40e_get_iscsi_tc_map(pf);
4922 else
ea6acb7e 4923 return I40E_DEFAULT_TRAFFIC_CLASS;
41c445ff
JB
4924}
4925
4926/**
4927 * i40e_vsi_get_bw_info - Query VSI BW Information
4928 * @vsi: the VSI being queried
4929 *
4930 * Returns 0 on success, negative value on failure
4931 **/
4932static int i40e_vsi_get_bw_info(struct i40e_vsi *vsi)
4933{
4934 struct i40e_aqc_query_vsi_ets_sla_config_resp bw_ets_config = {0};
4935 struct i40e_aqc_query_vsi_bw_config_resp bw_config = {0};
4936 struct i40e_pf *pf = vsi->back;
4937 struct i40e_hw *hw = &pf->hw;
f1c7e72e 4938 i40e_status ret;
41c445ff 4939 u32 tc_bw_max;
41c445ff
JB
4940 int i;
4941
4942 /* Get the VSI level BW configuration */
f1c7e72e
SN
4943 ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, &bw_config, NULL);
4944 if (ret) {
41c445ff 4945 dev_info(&pf->pdev->dev,
f1c7e72e
SN
4946 "couldn't get PF vsi bw config, err %s aq_err %s\n",
4947 i40e_stat_str(&pf->hw, ret),
4948 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
dcae29be 4949 return -EINVAL;
41c445ff
JB
4950 }
4951
4952 /* Get the VSI level BW configuration per TC */
f1c7e72e
SN
4953 ret = i40e_aq_query_vsi_ets_sla_config(hw, vsi->seid, &bw_ets_config,
4954 NULL);
4955 if (ret) {
41c445ff 4956 dev_info(&pf->pdev->dev,
f1c7e72e
SN
4957 "couldn't get PF vsi ets bw config, err %s aq_err %s\n",
4958 i40e_stat_str(&pf->hw, ret),
4959 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
dcae29be 4960 return -EINVAL;
41c445ff
JB
4961 }
4962
4963 if (bw_config.tc_valid_bits != bw_ets_config.tc_valid_bits) {
4964 dev_info(&pf->pdev->dev,
4965 "Enabled TCs mismatch from querying VSI BW info 0x%08x 0x%08x\n",
4966 bw_config.tc_valid_bits,
4967 bw_ets_config.tc_valid_bits);
4968 /* Still continuing */
4969 }
4970
4971 vsi->bw_limit = le16_to_cpu(bw_config.port_bw_limit);
4972 vsi->bw_max_quanta = bw_config.max_bw;
4973 tc_bw_max = le16_to_cpu(bw_ets_config.tc_bw_max[0]) |
4974 (le16_to_cpu(bw_ets_config.tc_bw_max[1]) << 16);
4975 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
4976 vsi->bw_ets_share_credits[i] = bw_ets_config.share_credits[i];
4977 vsi->bw_ets_limit_credits[i] =
4978 le16_to_cpu(bw_ets_config.credits[i]);
4979 /* 3 bits out of 4 for each TC */
4980 vsi->bw_ets_max_quanta[i] = (u8)((tc_bw_max >> (i*4)) & 0x7);
4981 }
078b5876 4982
dcae29be 4983 return 0;
41c445ff
JB
4984}
4985
4986/**
4987 * i40e_vsi_configure_bw_alloc - Configure VSI BW allocation per TC
4988 * @vsi: the VSI being configured
4989 * @enabled_tc: TC bitmap
4990 * @bw_credits: BW shared credits per TC
4991 *
4992 * Returns 0 on success, negative value on failure
4993 **/
dcae29be 4994static int i40e_vsi_configure_bw_alloc(struct i40e_vsi *vsi, u8 enabled_tc,
41c445ff
JB
4995 u8 *bw_share)
4996{
4997 struct i40e_aqc_configure_vsi_tc_bw_data bw_data;
f1c7e72e 4998 i40e_status ret;
dcae29be 4999 int i;
41c445ff
JB
5000
5001 bw_data.tc_valid_bits = enabled_tc;
5002 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
5003 bw_data.tc_bw_credits[i] = bw_share[i];
5004
f1c7e72e
SN
5005 ret = i40e_aq_config_vsi_tc_bw(&vsi->back->hw, vsi->seid, &bw_data,
5006 NULL);
5007 if (ret) {
41c445ff 5008 dev_info(&vsi->back->pdev->dev,
69bfb110
JB
5009 "AQ command Config VSI BW allocation per TC failed = %d\n",
5010 vsi->back->hw.aq.asq_last_status);
dcae29be 5011 return -EINVAL;
41c445ff
JB
5012 }
5013
5014 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
5015 vsi->info.qs_handle[i] = bw_data.qs_handles[i];
5016
dcae29be 5017 return 0;
41c445ff
JB
5018}
5019
5020/**
5021 * i40e_vsi_config_netdev_tc - Setup the netdev TC configuration
5022 * @vsi: the VSI being configured
5023 * @enabled_tc: TC map to be enabled
5024 *
5025 **/
5026static void i40e_vsi_config_netdev_tc(struct i40e_vsi *vsi, u8 enabled_tc)
5027{
5028 struct net_device *netdev = vsi->netdev;
5029 struct i40e_pf *pf = vsi->back;
5030 struct i40e_hw *hw = &pf->hw;
5031 u8 netdev_tc = 0;
5032 int i;
5033 struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config;
5034
5035 if (!netdev)
5036 return;
5037
5038 if (!enabled_tc) {
5039 netdev_reset_tc(netdev);
5040 return;
5041 }
5042
5043 /* Set up actual enabled TCs on the VSI */
5044 if (netdev_set_num_tc(netdev, vsi->tc_config.numtc))
5045 return;
5046
5047 /* set per TC queues for the VSI */
5048 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
5049 /* Only set TC queues for enabled tcs
5050 *
5051 * e.g. For a VSI that has TC0 and TC3 enabled the
5052 * enabled_tc bitmap would be 0x00001001; the driver
5053 * will set the numtc for netdev as 2 that will be
5054 * referenced by the netdev layer as TC 0 and 1.
5055 */
75f5cea9 5056 if (vsi->tc_config.enabled_tc & BIT(i))
41c445ff
JB
5057 netdev_set_tc_queue(netdev,
5058 vsi->tc_config.tc_info[i].netdev_tc,
5059 vsi->tc_config.tc_info[i].qcount,
5060 vsi->tc_config.tc_info[i].qoffset);
5061 }
5062
5063 /* Assign UP2TC map for the VSI */
5064 for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
5065 /* Get the actual TC# for the UP */
5066 u8 ets_tc = dcbcfg->etscfg.prioritytable[i];
5067 /* Get the mapped netdev TC# for the UP */
5068 netdev_tc = vsi->tc_config.tc_info[ets_tc].netdev_tc;
5069 netdev_set_prio_tc_map(netdev, i, netdev_tc);
5070 }
5071}
5072
5073/**
5074 * i40e_vsi_update_queue_map - Update our copy of VSi info with new queue map
5075 * @vsi: the VSI being configured
5076 * @ctxt: the ctxt buffer returned from AQ VSI update param command
5077 **/
5078static void i40e_vsi_update_queue_map(struct i40e_vsi *vsi,
5079 struct i40e_vsi_context *ctxt)
5080{
5081 /* copy just the sections touched not the entire info
5082 * since not all sections are valid as returned by
5083 * update vsi params
5084 */
5085 vsi->info.mapping_flags = ctxt->info.mapping_flags;
5086 memcpy(&vsi->info.queue_mapping,
5087 &ctxt->info.queue_mapping, sizeof(vsi->info.queue_mapping));
5088 memcpy(&vsi->info.tc_mapping, ctxt->info.tc_mapping,
5089 sizeof(vsi->info.tc_mapping));
5090}
5091
5092/**
5093 * i40e_vsi_config_tc - Configure VSI Tx Scheduler for given TC map
5094 * @vsi: VSI to be configured
5095 * @enabled_tc: TC bitmap
5096 *
5097 * This configures a particular VSI for TCs that are mapped to the
5098 * given TC bitmap. It uses default bandwidth share for TCs across
5099 * VSIs to configure TC for a particular VSI.
5100 *
5101 * NOTE:
5102 * It is expected that the VSI queues have been quisced before calling
5103 * this function.
5104 **/
5105static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc)
5106{
5107 u8 bw_share[I40E_MAX_TRAFFIC_CLASS] = {0};
5108 struct i40e_vsi_context ctxt;
5109 int ret = 0;
5110 int i;
5111
5112 /* Check if enabled_tc is same as existing or new TCs */
5113 if (vsi->tc_config.enabled_tc == enabled_tc)
5114 return ret;
5115
5116 /* Enable ETS TCs with equal BW Share for now across all VSIs */
5117 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
75f5cea9 5118 if (enabled_tc & BIT(i))
41c445ff
JB
5119 bw_share[i] = 1;
5120 }
5121
5122 ret = i40e_vsi_configure_bw_alloc(vsi, enabled_tc, bw_share);
5123 if (ret) {
5124 dev_info(&vsi->back->pdev->dev,
5125 "Failed configuring TC map %d for VSI %d\n",
5126 enabled_tc, vsi->seid);
5127 goto out;
5128 }
5129
5130 /* Update Queue Pairs Mapping for currently enabled UPs */
5131 ctxt.seid = vsi->seid;
5132 ctxt.pf_num = vsi->back->hw.pf_id;
5133 ctxt.vf_num = 0;
5134 ctxt.uplink_seid = vsi->uplink_seid;
1a2f6248 5135 ctxt.info = vsi->info;
41c445ff
JB
5136 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, false);
5137
e3219ce6
ASJ
5138 if (vsi->back->flags & I40E_FLAG_IWARP_ENABLED) {
5139 ctxt.info.valid_sections |=
5140 cpu_to_le16(I40E_AQ_VSI_PROP_QUEUE_OPT_VALID);
5141 ctxt.info.queueing_opt_flags |= I40E_AQ_VSI_QUE_OPT_TCP_ENA;
5142 }
5143
41c445ff
JB
5144 /* Update the VSI after updating the VSI queue-mapping information */
5145 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
5146 if (ret) {
5147 dev_info(&vsi->back->pdev->dev,
f1c7e72e
SN
5148 "Update vsi tc config failed, err %s aq_err %s\n",
5149 i40e_stat_str(&vsi->back->hw, ret),
5150 i40e_aq_str(&vsi->back->hw,
5151 vsi->back->hw.aq.asq_last_status));
41c445ff
JB
5152 goto out;
5153 }
5154 /* update the local VSI info with updated queue map */
5155 i40e_vsi_update_queue_map(vsi, &ctxt);
5156 vsi->info.valid_sections = 0;
5157
5158 /* Update current VSI BW information */
5159 ret = i40e_vsi_get_bw_info(vsi);
5160 if (ret) {
5161 dev_info(&vsi->back->pdev->dev,
f1c7e72e
SN
5162 "Failed updating vsi bw info, err %s aq_err %s\n",
5163 i40e_stat_str(&vsi->back->hw, ret),
5164 i40e_aq_str(&vsi->back->hw,
5165 vsi->back->hw.aq.asq_last_status));
41c445ff
JB
5166 goto out;
5167 }
5168
5169 /* Update the netdev TC setup */
5170 i40e_vsi_config_netdev_tc(vsi, enabled_tc);
5171out:
5172 return ret;
5173}
5174
4e3b35b0
NP
5175/**
5176 * i40e_veb_config_tc - Configure TCs for given VEB
5177 * @veb: given VEB
5178 * @enabled_tc: TC bitmap
5179 *
5180 * Configures given TC bitmap for VEB (switching) element
5181 **/
5182int i40e_veb_config_tc(struct i40e_veb *veb, u8 enabled_tc)
5183{
5184 struct i40e_aqc_configure_switching_comp_bw_config_data bw_data = {0};
5185 struct i40e_pf *pf = veb->pf;
5186 int ret = 0;
5187 int i;
5188
5189 /* No TCs or already enabled TCs just return */
5190 if (!enabled_tc || veb->enabled_tc == enabled_tc)
5191 return ret;
5192
5193 bw_data.tc_valid_bits = enabled_tc;
5194 /* bw_data.absolute_credits is not set (relative) */
5195
5196 /* Enable ETS TCs with equal BW Share for now */
5197 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
75f5cea9 5198 if (enabled_tc & BIT(i))
4e3b35b0
NP
5199 bw_data.tc_bw_share_credits[i] = 1;
5200 }
5201
5202 ret = i40e_aq_config_switch_comp_bw_config(&pf->hw, veb->seid,
5203 &bw_data, NULL);
5204 if (ret) {
5205 dev_info(&pf->pdev->dev,
f1c7e72e
SN
5206 "VEB bw config failed, err %s aq_err %s\n",
5207 i40e_stat_str(&pf->hw, ret),
5208 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
4e3b35b0
NP
5209 goto out;
5210 }
5211
5212 /* Update the BW information */
5213 ret = i40e_veb_get_bw_info(veb);
5214 if (ret) {
5215 dev_info(&pf->pdev->dev,
f1c7e72e
SN
5216 "Failed getting veb bw config, err %s aq_err %s\n",
5217 i40e_stat_str(&pf->hw, ret),
5218 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
4e3b35b0
NP
5219 }
5220
5221out:
5222 return ret;
5223}
5224
5225#ifdef CONFIG_I40E_DCB
5226/**
5227 * i40e_dcb_reconfigure - Reconfigure all VEBs and VSIs
5228 * @pf: PF struct
5229 *
5230 * Reconfigure VEB/VSIs on a given PF; it is assumed that
5231 * the caller would've quiesce all the VSIs before calling
5232 * this function
5233 **/
5234static void i40e_dcb_reconfigure(struct i40e_pf *pf)
5235{
5236 u8 tc_map = 0;
5237 int ret;
5238 u8 v;
5239
5240 /* Enable the TCs available on PF to all VEBs */
5241 tc_map = i40e_pf_get_tc_map(pf);
5242 for (v = 0; v < I40E_MAX_VEB; v++) {
5243 if (!pf->veb[v])
5244 continue;
5245 ret = i40e_veb_config_tc(pf->veb[v], tc_map);
5246 if (ret) {
5247 dev_info(&pf->pdev->dev,
5248 "Failed configuring TC for VEB seid=%d\n",
5249 pf->veb[v]->seid);
5250 /* Will try to configure as many components */
5251 }
5252 }
5253
5254 /* Update each VSI */
505682cd 5255 for (v = 0; v < pf->num_alloc_vsi; v++) {
4e3b35b0
NP
5256 if (!pf->vsi[v])
5257 continue;
5258
5259 /* - Enable all TCs for the LAN VSI
5260 * - For all others keep them at TC0 for now
5261 */
5262 if (v == pf->lan_vsi)
5263 tc_map = i40e_pf_get_tc_map(pf);
5264 else
ea6acb7e 5265 tc_map = I40E_DEFAULT_TRAFFIC_CLASS;
4e3b35b0
NP
5266
5267 ret = i40e_vsi_config_tc(pf->vsi[v], tc_map);
5268 if (ret) {
5269 dev_info(&pf->pdev->dev,
5270 "Failed configuring TC for VSI seid=%d\n",
5271 pf->vsi[v]->seid);
5272 /* Will try to configure as many components */
5273 } else {
0672a091
NP
5274 /* Re-configure VSI vectors based on updated TC map */
5275 i40e_vsi_map_rings_to_vectors(pf->vsi[v]);
4e3b35b0
NP
5276 if (pf->vsi[v]->netdev)
5277 i40e_dcbnl_set_all(pf->vsi[v]);
5278 }
5279 }
5280}
5281
2fd75f31
NP
5282/**
5283 * i40e_resume_port_tx - Resume port Tx
5284 * @pf: PF struct
5285 *
5286 * Resume a port's Tx and issue a PF reset in case of failure to
5287 * resume.
5288 **/
5289static int i40e_resume_port_tx(struct i40e_pf *pf)
5290{
5291 struct i40e_hw *hw = &pf->hw;
5292 int ret;
5293
5294 ret = i40e_aq_resume_port_tx(hw, NULL);
5295 if (ret) {
5296 dev_info(&pf->pdev->dev,
f1c7e72e
SN
5297 "Resume Port Tx failed, err %s aq_err %s\n",
5298 i40e_stat_str(&pf->hw, ret),
5299 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
2fd75f31 5300 /* Schedule PF reset to recover */
0da36b97 5301 set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
2fd75f31
NP
5302 i40e_service_event_schedule(pf);
5303 }
5304
5305 return ret;
5306}
5307
4e3b35b0
NP
5308/**
5309 * i40e_init_pf_dcb - Initialize DCB configuration
5310 * @pf: PF being configured
5311 *
5312 * Query the current DCB configuration and cache it
5313 * in the hardware structure
5314 **/
5315static int i40e_init_pf_dcb(struct i40e_pf *pf)
5316{
5317 struct i40e_hw *hw = &pf->hw;
5318 int err = 0;
5319
025b4a54 5320 /* Do not enable DCB for SW1 and SW2 images even if the FW is capable */
d36e41dc 5321 if (pf->hw_features & I40E_HW_NO_DCB_SUPPORT)
025b4a54
ASJ
5322 goto out;
5323
4e3b35b0
NP
5324 /* Get the initial DCB configuration */
5325 err = i40e_init_dcb(hw);
5326 if (!err) {
5327 /* Device/Function is not DCBX capable */
5328 if ((!hw->func_caps.dcb) ||
5329 (hw->dcbx_status == I40E_DCBX_STATUS_DISABLED)) {
5330 dev_info(&pf->pdev->dev,
5331 "DCBX offload is not supported or is disabled for this PF.\n");
4e3b35b0
NP
5332 } else {
5333 /* When status is not DISABLED then DCBX in FW */
5334 pf->dcbx_cap = DCB_CAP_DCBX_LLD_MANAGED |
5335 DCB_CAP_DCBX_VER_IEEE;
4d9b6043
NP
5336
5337 pf->flags |= I40E_FLAG_DCB_CAPABLE;
a036244c
DE
5338 /* Enable DCB tagging only when more than one TC
5339 * or explicitly disable if only one TC
5340 */
4d9b6043
NP
5341 if (i40e_dcb_get_num_tc(&hw->local_dcbx_config) > 1)
5342 pf->flags |= I40E_FLAG_DCB_ENABLED;
a036244c
DE
5343 else
5344 pf->flags &= ~I40E_FLAG_DCB_ENABLED;
9fa61dd2
NP
5345 dev_dbg(&pf->pdev->dev,
5346 "DCBX offload is supported for this PF.\n");
4e3b35b0 5347 }
014269ff 5348 } else {
aebfc816 5349 dev_info(&pf->pdev->dev,
f1c7e72e
SN
5350 "Query for DCB configuration failed, err %s aq_err %s\n",
5351 i40e_stat_str(&pf->hw, err),
5352 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
4e3b35b0
NP
5353 }
5354
5355out:
5356 return err;
5357}
5358#endif /* CONFIG_I40E_DCB */
cf05ed08
JB
5359#define SPEED_SIZE 14
5360#define FC_SIZE 8
5361/**
5362 * i40e_print_link_message - print link up or down
5363 * @vsi: the VSI for which link needs a message
5364 */
c156f856 5365void i40e_print_link_message(struct i40e_vsi *vsi, bool isup)
cf05ed08 5366{
7ec9ba11 5367 enum i40e_aq_link_speed new_speed;
3fded466 5368 struct i40e_pf *pf = vsi->back;
a9165490
SN
5369 char *speed = "Unknown";
5370 char *fc = "Unknown";
3e03d7cc 5371 char *fec = "";
68e49702 5372 char *req_fec = "";
3e03d7cc 5373 char *an = "";
cf05ed08 5374
3fded466 5375 new_speed = pf->hw.phy.link_info.link_speed;
7ec9ba11
FS
5376
5377 if ((vsi->current_isup == isup) && (vsi->current_speed == new_speed))
c156f856
MJ
5378 return;
5379 vsi->current_isup = isup;
7ec9ba11 5380 vsi->current_speed = new_speed;
cf05ed08
JB
5381 if (!isup) {
5382 netdev_info(vsi->netdev, "NIC Link is Down\n");
5383 return;
5384 }
5385
148c2d80
GR
5386 /* Warn user if link speed on NPAR enabled partition is not at
5387 * least 10GB
5388 */
3fded466
SM
5389 if (pf->hw.func_caps.npar_enable &&
5390 (pf->hw.phy.link_info.link_speed == I40E_LINK_SPEED_1GB ||
5391 pf->hw.phy.link_info.link_speed == I40E_LINK_SPEED_100MB))
148c2d80
GR
5392 netdev_warn(vsi->netdev,
5393 "The partition detected link speed that is less than 10Gbps\n");
5394
3fded466 5395 switch (pf->hw.phy.link_info.link_speed) {
cf05ed08 5396 case I40E_LINK_SPEED_40GB:
a9165490 5397 speed = "40 G";
cf05ed08 5398 break;
ae24b409 5399 case I40E_LINK_SPEED_20GB:
a9165490 5400 speed = "20 G";
ae24b409 5401 break;
3123237a
CW
5402 case I40E_LINK_SPEED_25GB:
5403 speed = "25 G";
5404 break;
cf05ed08 5405 case I40E_LINK_SPEED_10GB:
a9165490 5406 speed = "10 G";
cf05ed08
JB
5407 break;
5408 case I40E_LINK_SPEED_1GB:
a9165490 5409 speed = "1000 M";
cf05ed08 5410 break;
5960d33f 5411 case I40E_LINK_SPEED_100MB:
a9165490 5412 speed = "100 M";
5960d33f 5413 break;
cf05ed08
JB
5414 default:
5415 break;
5416 }
5417
3fded466 5418 switch (pf->hw.fc.current_mode) {
cf05ed08 5419 case I40E_FC_FULL:
a9165490 5420 fc = "RX/TX";
cf05ed08
JB
5421 break;
5422 case I40E_FC_TX_PAUSE:
a9165490 5423 fc = "TX";
cf05ed08
JB
5424 break;
5425 case I40E_FC_RX_PAUSE:
a9165490 5426 fc = "RX";
cf05ed08
JB
5427 break;
5428 default:
a9165490 5429 fc = "None";
cf05ed08
JB
5430 break;
5431 }
5432
3fded466 5433 if (pf->hw.phy.link_info.link_speed == I40E_LINK_SPEED_25GB) {
68e49702 5434 req_fec = ", Requested FEC: None";
3e03d7cc
HT
5435 fec = ", FEC: None";
5436 an = ", Autoneg: False";
5437
3fded466 5438 if (pf->hw.phy.link_info.an_info & I40E_AQ_AN_COMPLETED)
3e03d7cc
HT
5439 an = ", Autoneg: True";
5440
3fded466 5441 if (pf->hw.phy.link_info.fec_info &
3e03d7cc
HT
5442 I40E_AQ_CONFIG_FEC_KR_ENA)
5443 fec = ", FEC: CL74 FC-FEC/BASE-R";
3fded466 5444 else if (pf->hw.phy.link_info.fec_info &
3e03d7cc
HT
5445 I40E_AQ_CONFIG_FEC_RS_ENA)
5446 fec = ", FEC: CL108 RS-FEC";
68e49702
MS
5447
5448 /* 'CL108 RS-FEC' should be displayed when RS is requested, or
5449 * both RS and FC are requested
5450 */
5451 if (vsi->back->hw.phy.link_info.req_fec_info &
5452 (I40E_AQ_REQUEST_FEC_KR | I40E_AQ_REQUEST_FEC_RS)) {
5453 if (vsi->back->hw.phy.link_info.req_fec_info &
5454 I40E_AQ_REQUEST_FEC_RS)
5455 req_fec = ", Requested FEC: CL108 RS-FEC";
5456 else
5457 req_fec = ", Requested FEC: CL74 FC-FEC/BASE-R";
5458 }
3e03d7cc
HT
5459 }
5460
68e49702
MS
5461 netdev_info(vsi->netdev, "NIC Link is Up, %sbps Full Duplex%s%s%s, Flow Control: %s\n",
5462 speed, req_fec, fec, an, fc);
cf05ed08 5463}
4e3b35b0 5464
41c445ff
JB
5465/**
5466 * i40e_up_complete - Finish the last steps of bringing up a connection
5467 * @vsi: the VSI being configured
5468 **/
5469static int i40e_up_complete(struct i40e_vsi *vsi)
5470{
5471 struct i40e_pf *pf = vsi->back;
5472 int err;
5473
5474 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
5475 i40e_vsi_configure_msix(vsi);
5476 else
5477 i40e_configure_msi_and_legacy(vsi);
5478
5479 /* start rings */
3aa7b74d 5480 err = i40e_vsi_start_rings(vsi);
41c445ff
JB
5481 if (err)
5482 return err;
5483
0da36b97 5484 clear_bit(__I40E_VSI_DOWN, vsi->state);
41c445ff
JB
5485 i40e_napi_enable_all(vsi);
5486 i40e_vsi_enable_irq(vsi);
5487
5488 if ((pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP) &&
5489 (vsi->netdev)) {
cf05ed08 5490 i40e_print_link_message(vsi, true);
41c445ff
JB
5491 netif_tx_start_all_queues(vsi->netdev);
5492 netif_carrier_on(vsi->netdev);
5493 }
ca64fa4e
ASJ
5494
5495 /* replay FDIR SB filters */
1e1be8f6
ASJ
5496 if (vsi->type == I40E_VSI_FDIR) {
5497 /* reset fd counters */
097dbf52
JK
5498 pf->fd_add_err = 0;
5499 pf->fd_atr_cnt = 0;
ca64fa4e 5500 i40e_fdir_filter_restore(vsi);
1e1be8f6 5501 }
e3219ce6
ASJ
5502
5503 /* On the next run of the service_task, notify any clients of the new
5504 * opened netdev
5505 */
5506 pf->flags |= I40E_FLAG_SERVICE_CLIENT_REQUESTED;
41c445ff
JB
5507 i40e_service_event_schedule(pf);
5508
5509 return 0;
5510}
5511
5512/**
5513 * i40e_vsi_reinit_locked - Reset the VSI
5514 * @vsi: the VSI being configured
5515 *
5516 * Rebuild the ring structs after some configuration
5517 * has changed, e.g. MTU size.
5518 **/
5519static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi)
5520{
5521 struct i40e_pf *pf = vsi->back;
5522
5523 WARN_ON(in_interrupt());
0da36b97 5524 while (test_and_set_bit(__I40E_CONFIG_BUSY, pf->state))
41c445ff
JB
5525 usleep_range(1000, 2000);
5526 i40e_down(vsi);
5527
41c445ff 5528 i40e_up(vsi);
0da36b97 5529 clear_bit(__I40E_CONFIG_BUSY, pf->state);
41c445ff
JB
5530}
5531
5532/**
5533 * i40e_up - Bring the connection back up after being down
5534 * @vsi: the VSI being configured
5535 **/
5536int i40e_up(struct i40e_vsi *vsi)
5537{
5538 int err;
5539
5540 err = i40e_vsi_configure(vsi);
5541 if (!err)
5542 err = i40e_up_complete(vsi);
5543
5544 return err;
5545}
5546
5547/**
5548 * i40e_down - Shutdown the connection processing
5549 * @vsi: the VSI being stopped
5550 **/
5551void i40e_down(struct i40e_vsi *vsi)
5552{
5553 int i;
5554
5555 /* It is assumed that the caller of this function
d19cb64b 5556 * sets the vsi->state __I40E_VSI_DOWN bit.
41c445ff
JB
5557 */
5558 if (vsi->netdev) {
5559 netif_carrier_off(vsi->netdev);
5560 netif_tx_disable(vsi->netdev);
5561 }
5562 i40e_vsi_disable_irq(vsi);
3aa7b74d 5563 i40e_vsi_stop_rings(vsi);
41c445ff
JB
5564 i40e_napi_disable_all(vsi);
5565
5566 for (i = 0; i < vsi->num_queue_pairs; i++) {
9f65e15b 5567 i40e_clean_tx_ring(vsi->tx_rings[i]);
74608d17
BT
5568 if (i40e_enabled_xdp_vsi(vsi))
5569 i40e_clean_tx_ring(vsi->xdp_rings[i]);
9f65e15b 5570 i40e_clean_rx_ring(vsi->rx_rings[i]);
41c445ff 5571 }
f980d445 5572
41c445ff
JB
5573}
5574
5575/**
5576 * i40e_setup_tc - configure multiple traffic classes
5577 * @netdev: net device to configure
5578 * @tc: number of traffic classes to enable
5579 **/
5580static int i40e_setup_tc(struct net_device *netdev, u8 tc)
5581{
5582 struct i40e_netdev_priv *np = netdev_priv(netdev);
5583 struct i40e_vsi *vsi = np->vsi;
5584 struct i40e_pf *pf = vsi->back;
5585 u8 enabled_tc = 0;
5586 int ret = -EINVAL;
5587 int i;
5588
5589 /* Check if DCB enabled to continue */
5590 if (!(pf->flags & I40E_FLAG_DCB_ENABLED)) {
5591 netdev_info(netdev, "DCB is not enabled for adapter\n");
5592 goto exit;
5593 }
5594
5595 /* Check if MFP enabled */
5596 if (pf->flags & I40E_FLAG_MFP_ENABLED) {
5597 netdev_info(netdev, "Configuring TC not supported in MFP mode\n");
5598 goto exit;
5599 }
5600
5601 /* Check whether tc count is within enabled limit */
5602 if (tc > i40e_pf_get_num_tc(pf)) {
5603 netdev_info(netdev, "TC count greater than enabled on link for adapter\n");
5604 goto exit;
5605 }
5606
5607 /* Generate TC map for number of tc requested */
5608 for (i = 0; i < tc; i++)
75f5cea9 5609 enabled_tc |= BIT(i);
41c445ff
JB
5610
5611 /* Requesting same TC configuration as already enabled */
5612 if (enabled_tc == vsi->tc_config.enabled_tc)
5613 return 0;
5614
5615 /* Quiesce VSI queues */
5616 i40e_quiesce_vsi(vsi);
5617
5618 /* Configure VSI for enabled TCs */
5619 ret = i40e_vsi_config_tc(vsi, enabled_tc);
5620 if (ret) {
5621 netdev_info(netdev, "Failed configuring TC for VSI seid=%d\n",
5622 vsi->seid);
5623 goto exit;
5624 }
5625
5626 /* Unquiesce VSI */
5627 i40e_unquiesce_vsi(vsi);
5628
5629exit:
5630 return ret;
5631}
5632
2572ac53 5633static int __i40e_setup_tc(struct net_device *netdev, enum tc_setup_type type,
de4784ca 5634 void *type_data)
e4c6734e 5635{
de4784ca
JP
5636 struct tc_mqprio_qopt *mqprio = type_data;
5637
2572ac53 5638 if (type != TC_SETUP_MQPRIO)
38cf0426 5639 return -EOPNOTSUPP;
56f36acd 5640
de4784ca 5641 mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
56f36acd 5642
de4784ca 5643 return i40e_setup_tc(netdev, mqprio->num_tc);
e4c6734e
JF
5644}
5645
41c445ff
JB
5646/**
5647 * i40e_open - Called when a network interface is made active
5648 * @netdev: network interface device structure
5649 *
5650 * The open entry point is called when a network interface is made
5651 * active by the system (IFF_UP). At this point all resources needed
5652 * for transmit and receive operations are allocated, the interrupt
5653 * handler is registered with the OS, the netdev watchdog subtask is
5654 * enabled, and the stack is notified that the interface is ready.
5655 *
5656 * Returns 0 on success, negative value on failure
5657 **/
38e00438 5658int i40e_open(struct net_device *netdev)
41c445ff
JB
5659{
5660 struct i40e_netdev_priv *np = netdev_priv(netdev);
5661 struct i40e_vsi *vsi = np->vsi;
5662 struct i40e_pf *pf = vsi->back;
41c445ff
JB
5663 int err;
5664
4eb3f768 5665 /* disallow open during test or if eeprom is broken */
0da36b97
JK
5666 if (test_bit(__I40E_TESTING, pf->state) ||
5667 test_bit(__I40E_BAD_EEPROM, pf->state))
41c445ff
JB
5668 return -EBUSY;
5669
5670 netif_carrier_off(netdev);
5671
6c167f58
EK
5672 err = i40e_vsi_open(vsi);
5673 if (err)
5674 return err;
5675
059dab69
JB
5676 /* configure global TSO hardware offload settings */
5677 wr32(&pf->hw, I40E_GLLAN_TSOMSK_F, be32_to_cpu(TCP_FLAG_PSH |
5678 TCP_FLAG_FIN) >> 16);
5679 wr32(&pf->hw, I40E_GLLAN_TSOMSK_M, be32_to_cpu(TCP_FLAG_PSH |
5680 TCP_FLAG_FIN |
5681 TCP_FLAG_CWR) >> 16);
5682 wr32(&pf->hw, I40E_GLLAN_TSOMSK_L, be32_to_cpu(TCP_FLAG_CWR) >> 16);
5683
06a5f7f1 5684 udp_tunnel_get_rx_info(netdev);
e3219ce6 5685
6c167f58
EK
5686 return 0;
5687}
5688
5689/**
5690 * i40e_vsi_open -
5691 * @vsi: the VSI to open
5692 *
5693 * Finish initialization of the VSI.
5694 *
5695 * Returns 0 on success, negative value on failure
373149fc
MS
5696 *
5697 * Note: expects to be called while under rtnl_lock()
6c167f58
EK
5698 **/
5699int i40e_vsi_open(struct i40e_vsi *vsi)
5700{
5701 struct i40e_pf *pf = vsi->back;
b294ac70 5702 char int_name[I40E_INT_NAME_STR_LEN];
6c167f58
EK
5703 int err;
5704
41c445ff
JB
5705 /* allocate descriptors */
5706 err = i40e_vsi_setup_tx_resources(vsi);
5707 if (err)
5708 goto err_setup_tx;
5709 err = i40e_vsi_setup_rx_resources(vsi);
5710 if (err)
5711 goto err_setup_rx;
5712
5713 err = i40e_vsi_configure(vsi);
5714 if (err)
5715 goto err_setup_rx;
5716
c22e3c6c
SN
5717 if (vsi->netdev) {
5718 snprintf(int_name, sizeof(int_name) - 1, "%s-%s",
5719 dev_driver_string(&pf->pdev->dev), vsi->netdev->name);
5720 err = i40e_vsi_request_irq(vsi, int_name);
5721 if (err)
5722 goto err_setup_rx;
41c445ff 5723
c22e3c6c
SN
5724 /* Notify the stack of the actual queue counts. */
5725 err = netif_set_real_num_tx_queues(vsi->netdev,
5726 vsi->num_queue_pairs);
5727 if (err)
5728 goto err_set_queues;
25946ddb 5729
c22e3c6c
SN
5730 err = netif_set_real_num_rx_queues(vsi->netdev,
5731 vsi->num_queue_pairs);
5732 if (err)
5733 goto err_set_queues;
8a9eb7d3
SN
5734
5735 } else if (vsi->type == I40E_VSI_FDIR) {
e240f674 5736 snprintf(int_name, sizeof(int_name) - 1, "%s-%s:fdir",
b2008cbf
CW
5737 dev_driver_string(&pf->pdev->dev),
5738 dev_name(&pf->pdev->dev));
8a9eb7d3 5739 err = i40e_vsi_request_irq(vsi, int_name);
b2008cbf 5740
c22e3c6c 5741 } else {
ce9ccb17 5742 err = -EINVAL;
6c167f58
EK
5743 goto err_setup_rx;
5744 }
25946ddb 5745
41c445ff
JB
5746 err = i40e_up_complete(vsi);
5747 if (err)
5748 goto err_up_complete;
5749
41c445ff
JB
5750 return 0;
5751
5752err_up_complete:
5753 i40e_down(vsi);
25946ddb 5754err_set_queues:
41c445ff
JB
5755 i40e_vsi_free_irq(vsi);
5756err_setup_rx:
5757 i40e_vsi_free_rx_resources(vsi);
5758err_setup_tx:
5759 i40e_vsi_free_tx_resources(vsi);
5760 if (vsi == pf->vsi[pf->lan_vsi])
373149fc 5761 i40e_do_reset(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED), true);
41c445ff
JB
5762
5763 return err;
5764}
5765
17a73f6b
JG
5766/**
5767 * i40e_fdir_filter_exit - Cleans up the Flow Director accounting
b40c82e6 5768 * @pf: Pointer to PF
17a73f6b
JG
5769 *
5770 * This function destroys the hlist where all the Flow Director
5771 * filters were saved.
5772 **/
5773static void i40e_fdir_filter_exit(struct i40e_pf *pf)
5774{
5775 struct i40e_fdir_filter *filter;
0e588de1 5776 struct i40e_flex_pit *pit_entry, *tmp;
17a73f6b
JG
5777 struct hlist_node *node2;
5778
5779 hlist_for_each_entry_safe(filter, node2,
5780 &pf->fdir_filter_list, fdir_node) {
5781 hlist_del(&filter->fdir_node);
5782 kfree(filter);
5783 }
097dbf52 5784
0e588de1
JK
5785 list_for_each_entry_safe(pit_entry, tmp, &pf->l3_flex_pit_list, list) {
5786 list_del(&pit_entry->list);
5787 kfree(pit_entry);
5788 }
5789 INIT_LIST_HEAD(&pf->l3_flex_pit_list);
5790
5791 list_for_each_entry_safe(pit_entry, tmp, &pf->l4_flex_pit_list, list) {
5792 list_del(&pit_entry->list);
5793 kfree(pit_entry);
5794 }
5795 INIT_LIST_HEAD(&pf->l4_flex_pit_list);
5796
17a73f6b 5797 pf->fdir_pf_active_filters = 0;
097dbf52
JK
5798 pf->fd_tcp4_filter_cnt = 0;
5799 pf->fd_udp4_filter_cnt = 0;
f223c875 5800 pf->fd_sctp4_filter_cnt = 0;
097dbf52 5801 pf->fd_ip4_filter_cnt = 0;
3bcee1e6
JK
5802
5803 /* Reprogram the default input set for TCP/IPv4 */
5804 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_TCP,
5805 I40E_L3_SRC_MASK | I40E_L3_DST_MASK |
5806 I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
5807
5808 /* Reprogram the default input set for UDP/IPv4 */
5809 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_UDP,
5810 I40E_L3_SRC_MASK | I40E_L3_DST_MASK |
5811 I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
5812
5813 /* Reprogram the default input set for SCTP/IPv4 */
5814 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_SCTP,
5815 I40E_L3_SRC_MASK | I40E_L3_DST_MASK |
5816 I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
5817
5818 /* Reprogram the default input set for Other/IPv4 */
5819 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_OTHER,
5820 I40E_L3_SRC_MASK | I40E_L3_DST_MASK);
17a73f6b
JG
5821}
5822
41c445ff
JB
5823/**
5824 * i40e_close - Disables a network interface
5825 * @netdev: network interface device structure
5826 *
5827 * The close entry point is called when an interface is de-activated
5828 * by the OS. The hardware is still under the driver's control, but
5829 * this netdev interface is disabled.
5830 *
5831 * Returns 0, this is not allowed to fail
5832 **/
38e00438 5833int i40e_close(struct net_device *netdev)
41c445ff
JB
5834{
5835 struct i40e_netdev_priv *np = netdev_priv(netdev);
5836 struct i40e_vsi *vsi = np->vsi;
5837
90ef8d47 5838 i40e_vsi_close(vsi);
41c445ff
JB
5839
5840 return 0;
5841}
5842
5843/**
5844 * i40e_do_reset - Start a PF or Core Reset sequence
5845 * @pf: board private structure
5846 * @reset_flags: which reset is requested
373149fc
MS
5847 * @lock_acquired: indicates whether or not the lock has been acquired
5848 * before this function was called.
41c445ff
JB
5849 *
5850 * The essential difference in resets is that the PF Reset
5851 * doesn't clear the packet buffers, doesn't reset the PE
5852 * firmware, and doesn't bother the other PFs on the chip.
5853 **/
373149fc 5854void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags, bool lock_acquired)
41c445ff
JB
5855{
5856 u32 val;
5857
5858 WARN_ON(in_interrupt());
5859
263fc48f 5860
41c445ff 5861 /* do the biggest reset indicated */
41a1d04b 5862 if (reset_flags & BIT_ULL(__I40E_GLOBAL_RESET_REQUESTED)) {
41c445ff
JB
5863
5864 /* Request a Global Reset
5865 *
5866 * This will start the chip's countdown to the actual full
5867 * chip reset event, and a warning interrupt to be sent
5868 * to all PFs, including the requestor. Our handler
5869 * for the warning interrupt will deal with the shutdown
5870 * and recovery of the switch setup.
5871 */
69bfb110 5872 dev_dbg(&pf->pdev->dev, "GlobalR requested\n");
41c445ff
JB
5873 val = rd32(&pf->hw, I40E_GLGEN_RTRIG);
5874 val |= I40E_GLGEN_RTRIG_GLOBR_MASK;
5875 wr32(&pf->hw, I40E_GLGEN_RTRIG, val);
5876
41a1d04b 5877 } else if (reset_flags & BIT_ULL(__I40E_CORE_RESET_REQUESTED)) {
41c445ff
JB
5878
5879 /* Request a Core Reset
5880 *
5881 * Same as Global Reset, except does *not* include the MAC/PHY
5882 */
69bfb110 5883 dev_dbg(&pf->pdev->dev, "CoreR requested\n");
41c445ff
JB
5884 val = rd32(&pf->hw, I40E_GLGEN_RTRIG);
5885 val |= I40E_GLGEN_RTRIG_CORER_MASK;
5886 wr32(&pf->hw, I40E_GLGEN_RTRIG, val);
5887 i40e_flush(&pf->hw);
5888
41a1d04b 5889 } else if (reset_flags & BIT_ULL(__I40E_PF_RESET_REQUESTED)) {
41c445ff
JB
5890
5891 /* Request a PF Reset
5892 *
5893 * Resets only the PF-specific registers
5894 *
5895 * This goes directly to the tear-down and rebuild of
5896 * the switch, since we need to do all the recovery as
5897 * for the Core Reset.
5898 */
69bfb110 5899 dev_dbg(&pf->pdev->dev, "PFR requested\n");
373149fc 5900 i40e_handle_reset_warning(pf, lock_acquired);
41c445ff 5901
41a1d04b 5902 } else if (reset_flags & BIT_ULL(__I40E_REINIT_REQUESTED)) {
41c445ff
JB
5903 int v;
5904
5905 /* Find the VSI(s) that requested a re-init */
5906 dev_info(&pf->pdev->dev,
5907 "VSI reinit requested\n");
505682cd 5908 for (v = 0; v < pf->num_alloc_vsi; v++) {
41c445ff 5909 struct i40e_vsi *vsi = pf->vsi[v];
6995b36c 5910
41c445ff 5911 if (vsi != NULL &&
d19cb64b 5912 test_and_clear_bit(__I40E_VSI_REINIT_REQUESTED,
0da36b97 5913 vsi->state))
41c445ff 5914 i40e_vsi_reinit_locked(pf->vsi[v]);
41c445ff 5915 }
41a1d04b 5916 } else if (reset_flags & BIT_ULL(__I40E_DOWN_REQUESTED)) {
b5d06f05
NP
5917 int v;
5918
5919 /* Find the VSI(s) that needs to be brought down */
5920 dev_info(&pf->pdev->dev, "VSI down requested\n");
5921 for (v = 0; v < pf->num_alloc_vsi; v++) {
5922 struct i40e_vsi *vsi = pf->vsi[v];
6995b36c 5923
b5d06f05 5924 if (vsi != NULL &&
d19cb64b 5925 test_and_clear_bit(__I40E_VSI_DOWN_REQUESTED,
0da36b97
JK
5926 vsi->state)) {
5927 set_bit(__I40E_VSI_DOWN, vsi->state);
b5d06f05 5928 i40e_down(vsi);
b5d06f05
NP
5929 }
5930 }
41c445ff
JB
5931 } else {
5932 dev_info(&pf->pdev->dev,
5933 "bad reset request 0x%08x\n", reset_flags);
41c445ff
JB
5934 }
5935}
5936
4e3b35b0
NP
5937#ifdef CONFIG_I40E_DCB
5938/**
5939 * i40e_dcb_need_reconfig - Check if DCB needs reconfig
5940 * @pf: board private structure
5941 * @old_cfg: current DCB config
5942 * @new_cfg: new DCB config
5943 **/
5944bool i40e_dcb_need_reconfig(struct i40e_pf *pf,
5945 struct i40e_dcbx_config *old_cfg,
5946 struct i40e_dcbx_config *new_cfg)
5947{
5948 bool need_reconfig = false;
5949
5950 /* Check if ETS configuration has changed */
5951 if (memcmp(&new_cfg->etscfg,
5952 &old_cfg->etscfg,
5953 sizeof(new_cfg->etscfg))) {
5954 /* If Priority Table has changed reconfig is needed */
5955 if (memcmp(&new_cfg->etscfg.prioritytable,
5956 &old_cfg->etscfg.prioritytable,
5957 sizeof(new_cfg->etscfg.prioritytable))) {
5958 need_reconfig = true;
69bfb110 5959 dev_dbg(&pf->pdev->dev, "ETS UP2TC changed.\n");
4e3b35b0
NP
5960 }
5961
5962 if (memcmp(&new_cfg->etscfg.tcbwtable,
5963 &old_cfg->etscfg.tcbwtable,
5964 sizeof(new_cfg->etscfg.tcbwtable)))
69bfb110 5965 dev_dbg(&pf->pdev->dev, "ETS TC BW Table changed.\n");
4e3b35b0
NP
5966
5967 if (memcmp(&new_cfg->etscfg.tsatable,
5968 &old_cfg->etscfg.tsatable,
5969 sizeof(new_cfg->etscfg.tsatable)))
69bfb110 5970 dev_dbg(&pf->pdev->dev, "ETS TSA Table changed.\n");
4e3b35b0
NP
5971 }
5972
5973 /* Check if PFC configuration has changed */
5974 if (memcmp(&new_cfg->pfc,
5975 &old_cfg->pfc,
5976 sizeof(new_cfg->pfc))) {
5977 need_reconfig = true;
69bfb110 5978 dev_dbg(&pf->pdev->dev, "PFC config change detected.\n");
4e3b35b0
NP
5979 }
5980
5981 /* Check if APP Table has changed */
5982 if (memcmp(&new_cfg->app,
5983 &old_cfg->app,
3d9667a9 5984 sizeof(new_cfg->app))) {
4e3b35b0 5985 need_reconfig = true;
69bfb110 5986 dev_dbg(&pf->pdev->dev, "APP Table change detected.\n");
3d9667a9 5987 }
4e3b35b0 5988
fb43201f 5989 dev_dbg(&pf->pdev->dev, "dcb need_reconfig=%d\n", need_reconfig);
4e3b35b0
NP
5990 return need_reconfig;
5991}
5992
5993/**
5994 * i40e_handle_lldp_event - Handle LLDP Change MIB event
5995 * @pf: board private structure
5996 * @e: event info posted on ARQ
5997 **/
5998static int i40e_handle_lldp_event(struct i40e_pf *pf,
5999 struct i40e_arq_event_info *e)
6000{
6001 struct i40e_aqc_lldp_get_mib *mib =
6002 (struct i40e_aqc_lldp_get_mib *)&e->desc.params.raw;
6003 struct i40e_hw *hw = &pf->hw;
4e3b35b0
NP
6004 struct i40e_dcbx_config tmp_dcbx_cfg;
6005 bool need_reconfig = false;
6006 int ret = 0;
6007 u8 type;
6008
4d9b6043 6009 /* Not DCB capable or capability disabled */
ea6acb7e 6010 if (!(pf->flags & I40E_FLAG_DCB_CAPABLE))
4d9b6043
NP
6011 return ret;
6012
4e3b35b0
NP
6013 /* Ignore if event is not for Nearest Bridge */
6014 type = ((mib->type >> I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT)
6015 & I40E_AQ_LLDP_BRIDGE_TYPE_MASK);
fb43201f 6016 dev_dbg(&pf->pdev->dev, "LLDP event mib bridge type 0x%x\n", type);
4e3b35b0
NP
6017 if (type != I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE)
6018 return ret;
6019
6020 /* Check MIB Type and return if event for Remote MIB update */
6021 type = mib->type & I40E_AQ_LLDP_MIB_TYPE_MASK;
9fa61dd2 6022 dev_dbg(&pf->pdev->dev,
fb43201f 6023 "LLDP event mib type %s\n", type ? "remote" : "local");
4e3b35b0
NP
6024 if (type == I40E_AQ_LLDP_MIB_REMOTE) {
6025 /* Update the remote cached instance and return */
6026 ret = i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_REMOTE,
6027 I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE,
6028 &hw->remote_dcbx_config);
6029 goto exit;
6030 }
6031
9fa61dd2 6032 /* Store the old configuration */
1a2f6248 6033 tmp_dcbx_cfg = hw->local_dcbx_config;
9fa61dd2 6034
750fcbcf
NP
6035 /* Reset the old DCBx configuration data */
6036 memset(&hw->local_dcbx_config, 0, sizeof(hw->local_dcbx_config));
9fa61dd2
NP
6037 /* Get updated DCBX data from firmware */
6038 ret = i40e_get_dcb_config(&pf->hw);
4e3b35b0 6039 if (ret) {
f1c7e72e
SN
6040 dev_info(&pf->pdev->dev,
6041 "Failed querying DCB configuration data from firmware, err %s aq_err %s\n",
6042 i40e_stat_str(&pf->hw, ret),
6043 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
4e3b35b0
NP
6044 goto exit;
6045 }
6046
6047 /* No change detected in DCBX configs */
750fcbcf
NP
6048 if (!memcmp(&tmp_dcbx_cfg, &hw->local_dcbx_config,
6049 sizeof(tmp_dcbx_cfg))) {
69bfb110 6050 dev_dbg(&pf->pdev->dev, "No change detected in DCBX configuration.\n");
4e3b35b0
NP
6051 goto exit;
6052 }
6053
750fcbcf
NP
6054 need_reconfig = i40e_dcb_need_reconfig(pf, &tmp_dcbx_cfg,
6055 &hw->local_dcbx_config);
4e3b35b0 6056
750fcbcf 6057 i40e_dcbnl_flush_apps(pf, &tmp_dcbx_cfg, &hw->local_dcbx_config);
4e3b35b0
NP
6058
6059 if (!need_reconfig)
6060 goto exit;
6061
4d9b6043 6062 /* Enable DCB tagging only when more than one TC */
750fcbcf 6063 if (i40e_dcb_get_num_tc(&hw->local_dcbx_config) > 1)
4d9b6043
NP
6064 pf->flags |= I40E_FLAG_DCB_ENABLED;
6065 else
6066 pf->flags &= ~I40E_FLAG_DCB_ENABLED;
6067
0da36b97 6068 set_bit(__I40E_PORT_SUSPENDED, pf->state);
4e3b35b0
NP
6069 /* Reconfiguration needed quiesce all VSIs */
6070 i40e_pf_quiesce_all_vsi(pf);
6071
6072 /* Changes in configuration update VEB/VSI */
6073 i40e_dcb_reconfigure(pf);
6074
2fd75f31
NP
6075 ret = i40e_resume_port_tx(pf);
6076
0da36b97 6077 clear_bit(__I40E_PORT_SUSPENDED, pf->state);
2fd75f31 6078 /* In case of error no point in resuming VSIs */
69129dc3
NP
6079 if (ret)
6080 goto exit;
6081
3fe06f41
NP
6082 /* Wait for the PF's queues to be disabled */
6083 ret = i40e_pf_wait_queues_disabled(pf);
11e47708
PN
6084 if (ret) {
6085 /* Schedule PF reset to recover */
0da36b97 6086 set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
11e47708
PN
6087 i40e_service_event_schedule(pf);
6088 } else {
2fd75f31 6089 i40e_pf_unquiesce_all_vsi(pf);
0ef2d5af
MW
6090 pf->flags |= (I40E_FLAG_SERVICE_CLIENT_REQUESTED |
6091 I40E_FLAG_CLIENT_L2_CHANGE);
11e47708
PN
6092 }
6093
4e3b35b0
NP
6094exit:
6095 return ret;
6096}
6097#endif /* CONFIG_I40E_DCB */
6098
23326186
ASJ
6099/**
6100 * i40e_do_reset_safe - Protected reset path for userland calls.
6101 * @pf: board private structure
6102 * @reset_flags: which reset is requested
6103 *
6104 **/
6105void i40e_do_reset_safe(struct i40e_pf *pf, u32 reset_flags)
6106{
6107 rtnl_lock();
373149fc 6108 i40e_do_reset(pf, reset_flags, true);
23326186
ASJ
6109 rtnl_unlock();
6110}
6111
41c445ff
JB
6112/**
6113 * i40e_handle_lan_overflow_event - Handler for LAN queue overflow event
6114 * @pf: board private structure
6115 * @e: event info posted on ARQ
6116 *
6117 * Handler for LAN Queue Overflow Event generated by the firmware for PF
6118 * and VF queues
6119 **/
6120static void i40e_handle_lan_overflow_event(struct i40e_pf *pf,
6121 struct i40e_arq_event_info *e)
6122{
6123 struct i40e_aqc_lan_overflow *data =
6124 (struct i40e_aqc_lan_overflow *)&e->desc.params.raw;
6125 u32 queue = le32_to_cpu(data->prtdcb_rupto);
6126 u32 qtx_ctl = le32_to_cpu(data->otx_ctl);
6127 struct i40e_hw *hw = &pf->hw;
6128 struct i40e_vf *vf;
6129 u16 vf_id;
6130
69bfb110
JB
6131 dev_dbg(&pf->pdev->dev, "overflow Rx Queue Number = %d QTX_CTL=0x%08x\n",
6132 queue, qtx_ctl);
41c445ff
JB
6133
6134 /* Queue belongs to VF, find the VF and issue VF reset */
6135 if (((qtx_ctl & I40E_QTX_CTL_PFVF_Q_MASK)
6136 >> I40E_QTX_CTL_PFVF_Q_SHIFT) == I40E_QTX_CTL_VF_QUEUE) {
6137 vf_id = (u16)((qtx_ctl & I40E_QTX_CTL_VFVM_INDX_MASK)
6138 >> I40E_QTX_CTL_VFVM_INDX_SHIFT);
6139 vf_id -= hw->func_caps.vf_base_id;
6140 vf = &pf->vf[vf_id];
6141 i40e_vc_notify_vf_reset(vf);
6142 /* Allow VF to process pending reset notification */
6143 msleep(20);
6144 i40e_reset_vf(vf, false);
6145 }
6146}
6147
55a5e60b 6148/**
12957388
ASJ
6149 * i40e_get_cur_guaranteed_fd_count - Get the consumed guaranteed FD filters
6150 * @pf: board private structure
6151 **/
04294e38 6152u32 i40e_get_cur_guaranteed_fd_count(struct i40e_pf *pf)
12957388 6153{
04294e38 6154 u32 val, fcnt_prog;
12957388
ASJ
6155
6156 val = rd32(&pf->hw, I40E_PFQF_FDSTAT);
6157 fcnt_prog = (val & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK);
6158 return fcnt_prog;
6159}
6160
6161/**
04294e38 6162 * i40e_get_current_fd_count - Get total FD filters programmed for this PF
55a5e60b
ASJ
6163 * @pf: board private structure
6164 **/
04294e38 6165u32 i40e_get_current_fd_count(struct i40e_pf *pf)
55a5e60b 6166{
04294e38
ASJ
6167 u32 val, fcnt_prog;
6168
55a5e60b
ASJ
6169 val = rd32(&pf->hw, I40E_PFQF_FDSTAT);
6170 fcnt_prog = (val & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK) +
6171 ((val & I40E_PFQF_FDSTAT_BEST_CNT_MASK) >>
6172 I40E_PFQF_FDSTAT_BEST_CNT_SHIFT);
6173 return fcnt_prog;
6174}
1e1be8f6 6175
04294e38
ASJ
6176/**
6177 * i40e_get_global_fd_count - Get total FD filters programmed on device
6178 * @pf: board private structure
6179 **/
6180u32 i40e_get_global_fd_count(struct i40e_pf *pf)
6181{
6182 u32 val, fcnt_prog;
6183
6184 val = rd32(&pf->hw, I40E_GLQF_FDCNT_0);
6185 fcnt_prog = (val & I40E_GLQF_FDCNT_0_GUARANT_CNT_MASK) +
6186 ((val & I40E_GLQF_FDCNT_0_BESTCNT_MASK) >>
6187 I40E_GLQF_FDCNT_0_BESTCNT_SHIFT);
6188 return fcnt_prog;
6189}
6190
55a5e60b
ASJ
6191/**
6192 * i40e_fdir_check_and_reenable - Function to reenabe FD ATR or SB if disabled
6193 * @pf: board private structure
6194 **/
6195void i40e_fdir_check_and_reenable(struct i40e_pf *pf)
6196{
3487b6c3 6197 struct i40e_fdir_filter *filter;
55a5e60b 6198 u32 fcnt_prog, fcnt_avail;
3487b6c3 6199 struct hlist_node *node;
55a5e60b 6200
0da36b97 6201 if (test_bit(__I40E_FD_FLUSH_REQUESTED, pf->state))
1e1be8f6
ASJ
6202 return;
6203
47994c11 6204 /* Check if we have enough room to re-enable FDir SB capability. */
04294e38 6205 fcnt_prog = i40e_get_global_fd_count(pf);
12957388 6206 fcnt_avail = pf->fdir_pf_filter_count;
1e1be8f6
ASJ
6207 if ((fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM)) ||
6208 (pf->fd_add_err == 0) ||
6209 (i40e_get_current_atr_cnt(pf) < pf->fd_atr_cnt)) {
47994c11
JK
6210 if (pf->flags & I40E_FLAG_FD_SB_AUTO_DISABLED) {
6211 pf->flags &= ~I40E_FLAG_FD_SB_AUTO_DISABLED;
6212 if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
6213 (I40E_DEBUG_FD & pf->hw.debug_mask))
2e4875e3 6214 dev_info(&pf->pdev->dev, "FD Sideband/ntuple is being enabled since we have space in the table now\n");
55a5e60b
ASJ
6215 }
6216 }
a3417d28 6217
47994c11
JK
6218 /* We should wait for even more space before re-enabling ATR.
6219 * Additionally, we cannot enable ATR as long as we still have TCP SB
6220 * rules active.
a3417d28 6221 */
47994c11
JK
6222 if ((fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM_FOR_ATR)) &&
6223 (pf->fd_tcp4_filter_cnt == 0)) {
6224 if (pf->flags & I40E_FLAG_FD_ATR_AUTO_DISABLED) {
6225 pf->flags &= ~I40E_FLAG_FD_ATR_AUTO_DISABLED;
6226 if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
6227 (I40E_DEBUG_FD & pf->hw.debug_mask))
a3417d28 6228 dev_info(&pf->pdev->dev, "ATR is being enabled since we have space in the table and there are no conflicting ntuple rules\n");
55a5e60b
ASJ
6229 }
6230 }
3487b6c3
CW
6231
6232 /* if hw had a problem adding a filter, delete it */
6233 if (pf->fd_inv > 0) {
6234 hlist_for_each_entry_safe(filter, node,
6235 &pf->fdir_filter_list, fdir_node) {
6236 if (filter->fd_id == pf->fd_inv) {
6237 hlist_del(&filter->fdir_node);
6238 kfree(filter);
6239 pf->fdir_pf_active_filters--;
6240 }
6241 }
6242 }
55a5e60b
ASJ
6243}
6244
1e1be8f6 6245#define I40E_MIN_FD_FLUSH_INTERVAL 10
04294e38 6246#define I40E_MIN_FD_FLUSH_SB_ATR_UNSTABLE 30
1e1be8f6
ASJ
6247/**
6248 * i40e_fdir_flush_and_replay - Function to flush all FD filters and replay SB
6249 * @pf: board private structure
6250 **/
6251static void i40e_fdir_flush_and_replay(struct i40e_pf *pf)
6252{
04294e38 6253 unsigned long min_flush_time;
1e1be8f6 6254 int flush_wait_retry = 50;
04294e38
ASJ
6255 bool disable_atr = false;
6256 int fd_room;
1e1be8f6
ASJ
6257 int reg;
6258
a5fdaf34
JB
6259 if (!time_after(jiffies, pf->fd_flush_timestamp +
6260 (I40E_MIN_FD_FLUSH_INTERVAL * HZ)))
6261 return;
04294e38 6262
a5fdaf34
JB
6263 /* If the flush is happening too quick and we have mostly SB rules we
6264 * should not re-enable ATR for some time.
6265 */
6266 min_flush_time = pf->fd_flush_timestamp +
6267 (I40E_MIN_FD_FLUSH_SB_ATR_UNSTABLE * HZ);
6268 fd_room = pf->fdir_pf_filter_count - pf->fdir_pf_active_filters;
6269
6270 if (!(time_after(jiffies, min_flush_time)) &&
6271 (fd_room < I40E_FDIR_BUFFER_HEAD_ROOM_FOR_ATR)) {
6272 if (I40E_DEBUG_FD & pf->hw.debug_mask)
6273 dev_info(&pf->pdev->dev, "ATR disabled, not enough FD filter space.\n");
6274 disable_atr = true;
6275 }
6276
6277 pf->fd_flush_timestamp = jiffies;
47994c11 6278 pf->flags |= I40E_FLAG_FD_ATR_AUTO_DISABLED;
a5fdaf34
JB
6279 /* flush all filters */
6280 wr32(&pf->hw, I40E_PFQF_CTL_1,
6281 I40E_PFQF_CTL_1_CLEARFDTABLE_MASK);
6282 i40e_flush(&pf->hw);
6283 pf->fd_flush_cnt++;
6284 pf->fd_add_err = 0;
6285 do {
6286 /* Check FD flush status every 5-6msec */
6287 usleep_range(5000, 6000);
6288 reg = rd32(&pf->hw, I40E_PFQF_CTL_1);
6289 if (!(reg & I40E_PFQF_CTL_1_CLEARFDTABLE_MASK))
6290 break;
6291 } while (flush_wait_retry--);
6292 if (reg & I40E_PFQF_CTL_1_CLEARFDTABLE_MASK) {
6293 dev_warn(&pf->pdev->dev, "FD table did not flush, needs more time\n");
6294 } else {
6295 /* replay sideband filters */
6296 i40e_fdir_filter_restore(pf->vsi[pf->lan_vsi]);
097dbf52 6297 if (!disable_atr && !pf->fd_tcp4_filter_cnt)
47994c11 6298 pf->flags &= ~I40E_FLAG_FD_ATR_AUTO_DISABLED;
0da36b97 6299 clear_bit(__I40E_FD_FLUSH_REQUESTED, pf->state);
a5fdaf34
JB
6300 if (I40E_DEBUG_FD & pf->hw.debug_mask)
6301 dev_info(&pf->pdev->dev, "FD Filter table flushed and FD-SB replayed.\n");
1e1be8f6
ASJ
6302 }
6303}
6304
6305/**
6306 * i40e_get_current_atr_count - Get the count of total FD ATR filters programmed
6307 * @pf: board private structure
6308 **/
04294e38 6309u32 i40e_get_current_atr_cnt(struct i40e_pf *pf)
1e1be8f6
ASJ
6310{
6311 return i40e_get_current_fd_count(pf) - pf->fdir_pf_active_filters;
6312}
6313
6314/* We can see up to 256 filter programming desc in transit if the filters are
6315 * being applied really fast; before we see the first
6316 * filter miss error on Rx queue 0. Accumulating enough error messages before
6317 * reacting will make sure we don't cause flush too often.
6318 */
6319#define I40E_MAX_FD_PROGRAM_ERROR 256
6320
41c445ff
JB
6321/**
6322 * i40e_fdir_reinit_subtask - Worker thread to reinit FDIR filter table
6323 * @pf: board private structure
6324 **/
6325static void i40e_fdir_reinit_subtask(struct i40e_pf *pf)
6326{
41c445ff 6327
41c445ff 6328 /* if interface is down do nothing */
9e6c9c0f 6329 if (test_bit(__I40E_DOWN, pf->state))
41c445ff 6330 return;
1e1be8f6 6331
0da36b97 6332 if (test_bit(__I40E_FD_FLUSH_REQUESTED, pf->state))
1e1be8f6
ASJ
6333 i40e_fdir_flush_and_replay(pf);
6334
55a5e60b
ASJ
6335 i40e_fdir_check_and_reenable(pf);
6336
41c445ff
JB
6337}
6338
6339/**
6340 * i40e_vsi_link_event - notify VSI of a link event
6341 * @vsi: vsi to be notified
6342 * @link_up: link up or down
6343 **/
6344static void i40e_vsi_link_event(struct i40e_vsi *vsi, bool link_up)
6345{
0da36b97 6346 if (!vsi || test_bit(__I40E_VSI_DOWN, vsi->state))
41c445ff
JB
6347 return;
6348
6349 switch (vsi->type) {
6350 case I40E_VSI_MAIN:
6351 if (!vsi->netdev || !vsi->netdev_registered)
6352 break;
6353
6354 if (link_up) {
6355 netif_carrier_on(vsi->netdev);
6356 netif_tx_wake_all_queues(vsi->netdev);
6357 } else {
6358 netif_carrier_off(vsi->netdev);
6359 netif_tx_stop_all_queues(vsi->netdev);
6360 }
6361 break;
6362
6363 case I40E_VSI_SRIOV:
41c445ff
JB
6364 case I40E_VSI_VMDQ2:
6365 case I40E_VSI_CTRL:
e3219ce6 6366 case I40E_VSI_IWARP:
41c445ff
JB
6367 case I40E_VSI_MIRROR:
6368 default:
6369 /* there is no notification for other VSIs */
6370 break;
6371 }
6372}
6373
6374/**
6375 * i40e_veb_link_event - notify elements on the veb of a link event
6376 * @veb: veb to be notified
6377 * @link_up: link up or down
6378 **/
6379static void i40e_veb_link_event(struct i40e_veb *veb, bool link_up)
6380{
6381 struct i40e_pf *pf;
6382 int i;
6383
6384 if (!veb || !veb->pf)
6385 return;
6386 pf = veb->pf;
6387
6388 /* depth first... */
6389 for (i = 0; i < I40E_MAX_VEB; i++)
6390 if (pf->veb[i] && (pf->veb[i]->uplink_seid == veb->seid))
6391 i40e_veb_link_event(pf->veb[i], link_up);
6392
6393 /* ... now the local VSIs */
505682cd 6394 for (i = 0; i < pf->num_alloc_vsi; i++)
41c445ff
JB
6395 if (pf->vsi[i] && (pf->vsi[i]->uplink_seid == veb->seid))
6396 i40e_vsi_link_event(pf->vsi[i], link_up);
6397}
6398
6399/**
6400 * i40e_link_event - Update netif_carrier status
6401 * @pf: board private structure
6402 **/
6403static void i40e_link_event(struct i40e_pf *pf)
6404{
320684cd 6405 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
fef59ddf 6406 u8 new_link_speed, old_link_speed;
a72a5abc
JB
6407 i40e_status status;
6408 bool new_link, old_link;
41c445ff 6409
1f9610e4
CS
6410 /* save off old link status information */
6411 pf->hw.phy.link_info_old = pf->hw.phy.link_info;
6412
1e701e09
JB
6413 /* set this to force the get_link_status call to refresh state */
6414 pf->hw.phy.get_link_info = true;
6415
41c445ff 6416 old_link = (pf->hw.phy.link_info_old.link_info & I40E_AQ_LINK_UP);
a72a5abc
JB
6417
6418 status = i40e_get_link_status(&pf->hw, &new_link);
ae136708
HR
6419
6420 /* On success, disable temp link polling */
6421 if (status == I40E_SUCCESS) {
6422 if (pf->flags & I40E_FLAG_TEMP_LINK_POLLING)
6423 pf->flags &= ~I40E_FLAG_TEMP_LINK_POLLING;
6424 } else {
6425 /* Enable link polling temporarily until i40e_get_link_status
6426 * returns I40E_SUCCESS
6427 */
6428 pf->flags |= I40E_FLAG_TEMP_LINK_POLLING;
a72a5abc
JB
6429 dev_dbg(&pf->pdev->dev, "couldn't get link state, status: %d\n",
6430 status);
6431 return;
6432 }
6433
fef59ddf
CS
6434 old_link_speed = pf->hw.phy.link_info_old.link_speed;
6435 new_link_speed = pf->hw.phy.link_info.link_speed;
41c445ff 6436
1e701e09 6437 if (new_link == old_link &&
fef59ddf 6438 new_link_speed == old_link_speed &&
0da36b97 6439 (test_bit(__I40E_VSI_DOWN, vsi->state) ||
320684cd 6440 new_link == netif_carrier_ok(vsi->netdev)))
41c445ff 6441 return;
320684cd 6442
9a03449d 6443 i40e_print_link_message(vsi, new_link);
41c445ff
JB
6444
6445 /* Notify the base of the switch tree connected to
6446 * the link. Floating VEBs are not notified.
6447 */
6448 if (pf->lan_veb != I40E_NO_VEB && pf->veb[pf->lan_veb])
6449 i40e_veb_link_event(pf->veb[pf->lan_veb], new_link);
6450 else
320684cd 6451 i40e_vsi_link_event(vsi, new_link);
41c445ff
JB
6452
6453 if (pf->vf)
6454 i40e_vc_notify_link_state(pf);
beb0dff1
JK
6455
6456 if (pf->flags & I40E_FLAG_PTP)
6457 i40e_ptp_set_increment(pf);
41c445ff
JB
6458}
6459
41c445ff 6460/**
21536717 6461 * i40e_watchdog_subtask - periodic checks not using event driven response
41c445ff
JB
6462 * @pf: board private structure
6463 **/
6464static void i40e_watchdog_subtask(struct i40e_pf *pf)
6465{
6466 int i;
6467
6468 /* if interface is down do nothing */
9e6c9c0f 6469 if (test_bit(__I40E_DOWN, pf->state) ||
0da36b97 6470 test_bit(__I40E_CONFIG_BUSY, pf->state))
41c445ff
JB
6471 return;
6472
21536717
SN
6473 /* make sure we don't do these things too often */
6474 if (time_before(jiffies, (pf->service_timer_previous +
6475 pf->service_timer_period)))
6476 return;
6477 pf->service_timer_previous = jiffies;
6478
ae136708
HR
6479 if ((pf->flags & I40E_FLAG_LINK_POLLING_ENABLED) ||
6480 (pf->flags & I40E_FLAG_TEMP_LINK_POLLING))
9ac77266 6481 i40e_link_event(pf);
21536717 6482
41c445ff
JB
6483 /* Update the stats for active netdevs so the network stack
6484 * can look at updated numbers whenever it cares to
6485 */
505682cd 6486 for (i = 0; i < pf->num_alloc_vsi; i++)
41c445ff
JB
6487 if (pf->vsi[i] && pf->vsi[i]->netdev)
6488 i40e_update_stats(pf->vsi[i]);
6489
d1a8d275
ASJ
6490 if (pf->flags & I40E_FLAG_VEB_STATS_ENABLED) {
6491 /* Update the stats for the active switching components */
6492 for (i = 0; i < I40E_MAX_VEB; i++)
6493 if (pf->veb[i])
6494 i40e_update_veb_stats(pf->veb[i]);
6495 }
beb0dff1 6496
61189556 6497 i40e_ptp_rx_hang(pf);
0bc0706b 6498 i40e_ptp_tx_hang(pf);
41c445ff
JB
6499}
6500
6501/**
6502 * i40e_reset_subtask - Set up for resetting the device and driver
6503 * @pf: board private structure
6504 **/
6505static void i40e_reset_subtask(struct i40e_pf *pf)
6506{
6507 u32 reset_flags = 0;
6508
0da36b97 6509 if (test_bit(__I40E_REINIT_REQUESTED, pf->state)) {
75f5cea9 6510 reset_flags |= BIT(__I40E_REINIT_REQUESTED);
0da36b97 6511 clear_bit(__I40E_REINIT_REQUESTED, pf->state);
41c445ff 6512 }
0da36b97 6513 if (test_bit(__I40E_PF_RESET_REQUESTED, pf->state)) {
75f5cea9 6514 reset_flags |= BIT(__I40E_PF_RESET_REQUESTED);
0da36b97 6515 clear_bit(__I40E_PF_RESET_REQUESTED, pf->state);
41c445ff 6516 }
0da36b97 6517 if (test_bit(__I40E_CORE_RESET_REQUESTED, pf->state)) {
75f5cea9 6518 reset_flags |= BIT(__I40E_CORE_RESET_REQUESTED);
0da36b97 6519 clear_bit(__I40E_CORE_RESET_REQUESTED, pf->state);
41c445ff 6520 }
0da36b97 6521 if (test_bit(__I40E_GLOBAL_RESET_REQUESTED, pf->state)) {
75f5cea9 6522 reset_flags |= BIT(__I40E_GLOBAL_RESET_REQUESTED);
0da36b97 6523 clear_bit(__I40E_GLOBAL_RESET_REQUESTED, pf->state);
41c445ff 6524 }
9e6c9c0f
MR
6525 if (test_bit(__I40E_DOWN_REQUESTED, pf->state)) {
6526 reset_flags |= BIT(__I40E_DOWN_REQUESTED);
6527 clear_bit(__I40E_DOWN_REQUESTED, pf->state);
b5d06f05 6528 }
41c445ff
JB
6529
6530 /* If there's a recovery already waiting, it takes
6531 * precedence before starting a new reset sequence.
6532 */
0da36b97 6533 if (test_bit(__I40E_RESET_INTR_RECEIVED, pf->state)) {
373149fc
MS
6534 i40e_prep_for_reset(pf, false);
6535 i40e_reset(pf);
6536 i40e_rebuild(pf, false, false);
41c445ff
JB
6537 }
6538
6539 /* If we're already down or resetting, just bail */
6540 if (reset_flags &&
9e6c9c0f 6541 !test_bit(__I40E_DOWN, pf->state) &&
0da36b97 6542 !test_bit(__I40E_CONFIG_BUSY, pf->state)) {
dfc4ff64 6543 i40e_do_reset(pf, reset_flags, false);
373149fc 6544 }
41c445ff
JB
6545}
6546
6547/**
6548 * i40e_handle_link_event - Handle link event
6549 * @pf: board private structure
6550 * @e: event info posted on ARQ
6551 **/
6552static void i40e_handle_link_event(struct i40e_pf *pf,
6553 struct i40e_arq_event_info *e)
6554{
41c445ff
JB
6555 struct i40e_aqc_get_link_status *status =
6556 (struct i40e_aqc_get_link_status *)&e->desc.params.raw;
41c445ff 6557
1e701e09
JB
6558 /* Do a new status request to re-enable LSE reporting
6559 * and load new status information into the hw struct
6560 * This completely ignores any state information
6561 * in the ARQ event info, instead choosing to always
6562 * issue the AQ update link status command.
6563 */
6564 i40e_link_event(pf);
6565
7b592f61
CW
6566 /* check for unqualified module, if link is down */
6567 if ((status->link_info & I40E_AQ_MEDIA_AVAILABLE) &&
6568 (!(status->an_info & I40E_AQ_QUALIFIED_MODULE)) &&
6569 (!(status->link_info & I40E_AQ_LINK_UP)))
6570 dev_err(&pf->pdev->dev,
6571 "The driver failed to link because an unqualified module was detected.\n");
41c445ff
JB
6572}
6573
6574/**
6575 * i40e_clean_adminq_subtask - Clean the AdminQ rings
6576 * @pf: board private structure
6577 **/
6578static void i40e_clean_adminq_subtask(struct i40e_pf *pf)
6579{
6580 struct i40e_arq_event_info event;
6581 struct i40e_hw *hw = &pf->hw;
6582 u16 pending, i = 0;
6583 i40e_status ret;
6584 u16 opcode;
86df242b 6585 u32 oldval;
41c445ff
JB
6586 u32 val;
6587
a316f651 6588 /* Do not run clean AQ when PF reset fails */
0da36b97 6589 if (test_bit(__I40E_RESET_FAILED, pf->state))
a316f651
ASJ
6590 return;
6591
86df242b
SN
6592 /* check for error indications */
6593 val = rd32(&pf->hw, pf->hw.aq.arq.len);
6594 oldval = val;
6595 if (val & I40E_PF_ARQLEN_ARQVFE_MASK) {
75eb73c1
MW
6596 if (hw->debug_mask & I40E_DEBUG_AQ)
6597 dev_info(&pf->pdev->dev, "ARQ VF Error detected\n");
86df242b
SN
6598 val &= ~I40E_PF_ARQLEN_ARQVFE_MASK;
6599 }
6600 if (val & I40E_PF_ARQLEN_ARQOVFL_MASK) {
75eb73c1
MW
6601 if (hw->debug_mask & I40E_DEBUG_AQ)
6602 dev_info(&pf->pdev->dev, "ARQ Overflow Error detected\n");
86df242b 6603 val &= ~I40E_PF_ARQLEN_ARQOVFL_MASK;
1d0a4ada 6604 pf->arq_overflows++;
86df242b
SN
6605 }
6606 if (val & I40E_PF_ARQLEN_ARQCRIT_MASK) {
75eb73c1
MW
6607 if (hw->debug_mask & I40E_DEBUG_AQ)
6608 dev_info(&pf->pdev->dev, "ARQ Critical Error detected\n");
86df242b
SN
6609 val &= ~I40E_PF_ARQLEN_ARQCRIT_MASK;
6610 }
6611 if (oldval != val)
6612 wr32(&pf->hw, pf->hw.aq.arq.len, val);
6613
6614 val = rd32(&pf->hw, pf->hw.aq.asq.len);
6615 oldval = val;
6616 if (val & I40E_PF_ATQLEN_ATQVFE_MASK) {
75eb73c1
MW
6617 if (pf->hw.debug_mask & I40E_DEBUG_AQ)
6618 dev_info(&pf->pdev->dev, "ASQ VF Error detected\n");
86df242b
SN
6619 val &= ~I40E_PF_ATQLEN_ATQVFE_MASK;
6620 }
6621 if (val & I40E_PF_ATQLEN_ATQOVFL_MASK) {
75eb73c1
MW
6622 if (pf->hw.debug_mask & I40E_DEBUG_AQ)
6623 dev_info(&pf->pdev->dev, "ASQ Overflow Error detected\n");
86df242b
SN
6624 val &= ~I40E_PF_ATQLEN_ATQOVFL_MASK;
6625 }
6626 if (val & I40E_PF_ATQLEN_ATQCRIT_MASK) {
75eb73c1
MW
6627 if (pf->hw.debug_mask & I40E_DEBUG_AQ)
6628 dev_info(&pf->pdev->dev, "ASQ Critical Error detected\n");
86df242b
SN
6629 val &= ~I40E_PF_ATQLEN_ATQCRIT_MASK;
6630 }
6631 if (oldval != val)
6632 wr32(&pf->hw, pf->hw.aq.asq.len, val);
6633
1001dc37
MW
6634 event.buf_len = I40E_MAX_AQ_BUF_SIZE;
6635 event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
41c445ff
JB
6636 if (!event.msg_buf)
6637 return;
6638
6639 do {
6640 ret = i40e_clean_arq_element(hw, &event, &pending);
56497978 6641 if (ret == I40E_ERR_ADMIN_QUEUE_NO_WORK)
41c445ff 6642 break;
56497978 6643 else if (ret) {
41c445ff
JB
6644 dev_info(&pf->pdev->dev, "ARQ event error %d\n", ret);
6645 break;
6646 }
6647
6648 opcode = le16_to_cpu(event.desc.opcode);
6649 switch (opcode) {
6650
6651 case i40e_aqc_opc_get_link_status:
6652 i40e_handle_link_event(pf, &event);
6653 break;
6654 case i40e_aqc_opc_send_msg_to_pf:
6655 ret = i40e_vc_process_vf_msg(pf,
6656 le16_to_cpu(event.desc.retval),
6657 le32_to_cpu(event.desc.cookie_high),
6658 le32_to_cpu(event.desc.cookie_low),
6659 event.msg_buf,
1001dc37 6660 event.msg_len);
41c445ff
JB
6661 break;
6662 case i40e_aqc_opc_lldp_update_mib:
69bfb110 6663 dev_dbg(&pf->pdev->dev, "ARQ: Update LLDP MIB event received\n");
4e3b35b0
NP
6664#ifdef CONFIG_I40E_DCB
6665 rtnl_lock();
6666 ret = i40e_handle_lldp_event(pf, &event);
6667 rtnl_unlock();
6668#endif /* CONFIG_I40E_DCB */
41c445ff
JB
6669 break;
6670 case i40e_aqc_opc_event_lan_overflow:
69bfb110 6671 dev_dbg(&pf->pdev->dev, "ARQ LAN queue overflow event received\n");
41c445ff
JB
6672 i40e_handle_lan_overflow_event(pf, &event);
6673 break;
0467bc91
SN
6674 case i40e_aqc_opc_send_msg_to_peer:
6675 dev_info(&pf->pdev->dev, "ARQ: Msg from other pf\n");
6676 break;
91a0f930
SN
6677 case i40e_aqc_opc_nvm_erase:
6678 case i40e_aqc_opc_nvm_update:
00ada50d 6679 case i40e_aqc_opc_oem_post_update:
6e93d0c9
SN
6680 i40e_debug(&pf->hw, I40E_DEBUG_NVM,
6681 "ARQ NVM operation 0x%04x completed\n",
6682 opcode);
91a0f930 6683 break;
41c445ff
JB
6684 default:
6685 dev_info(&pf->pdev->dev,
56e5ca68 6686 "ARQ: Unknown event 0x%04x ignored\n",
0467bc91 6687 opcode);
41c445ff
JB
6688 break;
6689 }
1fca3265
CB
6690 } while (i++ < pf->adminq_work_limit);
6691
6692 if (i < pf->adminq_work_limit)
0da36b97 6693 clear_bit(__I40E_ADMINQ_EVENT_PENDING, pf->state);
41c445ff 6694
41c445ff
JB
6695 /* re-enable Admin queue interrupt cause */
6696 val = rd32(hw, I40E_PFINT_ICR0_ENA);
6697 val |= I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
6698 wr32(hw, I40E_PFINT_ICR0_ENA, val);
6699 i40e_flush(hw);
6700
6701 kfree(event.msg_buf);
6702}
6703
4eb3f768
SN
6704/**
6705 * i40e_verify_eeprom - make sure eeprom is good to use
6706 * @pf: board private structure
6707 **/
6708static void i40e_verify_eeprom(struct i40e_pf *pf)
6709{
6710 int err;
6711
6712 err = i40e_diag_eeprom_test(&pf->hw);
6713 if (err) {
6714 /* retry in case of garbage read */
6715 err = i40e_diag_eeprom_test(&pf->hw);
6716 if (err) {
6717 dev_info(&pf->pdev->dev, "eeprom check failed (%d), Tx/Rx traffic disabled\n",
6718 err);
0da36b97 6719 set_bit(__I40E_BAD_EEPROM, pf->state);
4eb3f768
SN
6720 }
6721 }
6722
0da36b97 6723 if (!err && test_bit(__I40E_BAD_EEPROM, pf->state)) {
4eb3f768 6724 dev_info(&pf->pdev->dev, "eeprom check passed, Tx/Rx traffic enabled\n");
0da36b97 6725 clear_bit(__I40E_BAD_EEPROM, pf->state);
4eb3f768
SN
6726 }
6727}
6728
386a0afa
AA
6729/**
6730 * i40e_enable_pf_switch_lb
b40c82e6 6731 * @pf: pointer to the PF structure
386a0afa
AA
6732 *
6733 * enable switch loop back or die - no point in a return value
6734 **/
6735static void i40e_enable_pf_switch_lb(struct i40e_pf *pf)
6736{
6737 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
6738 struct i40e_vsi_context ctxt;
f1c7e72e 6739 int ret;
386a0afa
AA
6740
6741 ctxt.seid = pf->main_vsi_seid;
6742 ctxt.pf_num = pf->hw.pf_id;
6743 ctxt.vf_num = 0;
f1c7e72e
SN
6744 ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
6745 if (ret) {
386a0afa 6746 dev_info(&pf->pdev->dev,
f1c7e72e
SN
6747 "couldn't get PF vsi config, err %s aq_err %s\n",
6748 i40e_stat_str(&pf->hw, ret),
6749 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
386a0afa
AA
6750 return;
6751 }
6752 ctxt.flags = I40E_AQ_VSI_TYPE_PF;
6753 ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
6754 ctxt.info.switch_id |= cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
6755
f1c7e72e
SN
6756 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
6757 if (ret) {
386a0afa 6758 dev_info(&pf->pdev->dev,
f1c7e72e
SN
6759 "update vsi switch failed, err %s aq_err %s\n",
6760 i40e_stat_str(&pf->hw, ret),
6761 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
386a0afa
AA
6762 }
6763}
6764
6765/**
6766 * i40e_disable_pf_switch_lb
b40c82e6 6767 * @pf: pointer to the PF structure
386a0afa
AA
6768 *
6769 * disable switch loop back or die - no point in a return value
6770 **/
6771static void i40e_disable_pf_switch_lb(struct i40e_pf *pf)
6772{
6773 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
6774 struct i40e_vsi_context ctxt;
f1c7e72e 6775 int ret;
386a0afa
AA
6776
6777 ctxt.seid = pf->main_vsi_seid;
6778 ctxt.pf_num = pf->hw.pf_id;
6779 ctxt.vf_num = 0;
f1c7e72e
SN
6780 ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
6781 if (ret) {
386a0afa 6782 dev_info(&pf->pdev->dev,
f1c7e72e
SN
6783 "couldn't get PF vsi config, err %s aq_err %s\n",
6784 i40e_stat_str(&pf->hw, ret),
6785 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
386a0afa
AA
6786 return;
6787 }
6788 ctxt.flags = I40E_AQ_VSI_TYPE_PF;
6789 ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
6790 ctxt.info.switch_id &= ~cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
6791
f1c7e72e
SN
6792 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
6793 if (ret) {
386a0afa 6794 dev_info(&pf->pdev->dev,
f1c7e72e
SN
6795 "update vsi switch failed, err %s aq_err %s\n",
6796 i40e_stat_str(&pf->hw, ret),
6797 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
386a0afa
AA
6798 }
6799}
6800
51616018
NP
6801/**
6802 * i40e_config_bridge_mode - Configure the HW bridge mode
6803 * @veb: pointer to the bridge instance
6804 *
6805 * Configure the loop back mode for the LAN VSI that is downlink to the
6806 * specified HW bridge instance. It is expected this function is called
6807 * when a new HW bridge is instantiated.
6808 **/
6809static void i40e_config_bridge_mode(struct i40e_veb *veb)
6810{
6811 struct i40e_pf *pf = veb->pf;
6812
6dec1017
SN
6813 if (pf->hw.debug_mask & I40E_DEBUG_LAN)
6814 dev_info(&pf->pdev->dev, "enabling bridge mode: %s\n",
6815 veb->bridge_mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
51616018
NP
6816 if (veb->bridge_mode & BRIDGE_MODE_VEPA)
6817 i40e_disable_pf_switch_lb(pf);
6818 else
6819 i40e_enable_pf_switch_lb(pf);
6820}
6821
41c445ff
JB
6822/**
6823 * i40e_reconstitute_veb - rebuild the VEB and anything connected to it
6824 * @veb: pointer to the VEB instance
6825 *
6826 * This is a recursive function that first builds the attached VSIs then
6827 * recurses in to build the next layer of VEB. We track the connections
6828 * through our own index numbers because the seid's from the HW could
6829 * change across the reset.
6830 **/
6831static int i40e_reconstitute_veb(struct i40e_veb *veb)
6832{
6833 struct i40e_vsi *ctl_vsi = NULL;
6834 struct i40e_pf *pf = veb->pf;
6835 int v, veb_idx;
6836 int ret;
6837
6838 /* build VSI that owns this VEB, temporarily attached to base VEB */
505682cd 6839 for (v = 0; v < pf->num_alloc_vsi && !ctl_vsi; v++) {
41c445ff
JB
6840 if (pf->vsi[v] &&
6841 pf->vsi[v]->veb_idx == veb->idx &&
6842 pf->vsi[v]->flags & I40E_VSI_FLAG_VEB_OWNER) {
6843 ctl_vsi = pf->vsi[v];
6844 break;
6845 }
6846 }
6847 if (!ctl_vsi) {
6848 dev_info(&pf->pdev->dev,
6849 "missing owner VSI for veb_idx %d\n", veb->idx);
6850 ret = -ENOENT;
6851 goto end_reconstitute;
6852 }
6853 if (ctl_vsi != pf->vsi[pf->lan_vsi])
6854 ctl_vsi->uplink_seid = pf->vsi[pf->lan_vsi]->uplink_seid;
6855 ret = i40e_add_vsi(ctl_vsi);
6856 if (ret) {
6857 dev_info(&pf->pdev->dev,
f1c7e72e
SN
6858 "rebuild of veb_idx %d owner VSI failed: %d\n",
6859 veb->idx, ret);
41c445ff
JB
6860 goto end_reconstitute;
6861 }
6862 i40e_vsi_reset_stats(ctl_vsi);
6863
6864 /* create the VEB in the switch and move the VSI onto the VEB */
6865 ret = i40e_add_veb(veb, ctl_vsi);
6866 if (ret)
6867 goto end_reconstitute;
6868
fc60861e
ASJ
6869 if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED)
6870 veb->bridge_mode = BRIDGE_MODE_VEB;
6871 else
6872 veb->bridge_mode = BRIDGE_MODE_VEPA;
51616018 6873 i40e_config_bridge_mode(veb);
b64ba084 6874
41c445ff 6875 /* create the remaining VSIs attached to this VEB */
505682cd 6876 for (v = 0; v < pf->num_alloc_vsi; v++) {
41c445ff
JB
6877 if (!pf->vsi[v] || pf->vsi[v] == ctl_vsi)
6878 continue;
6879
6880 if (pf->vsi[v]->veb_idx == veb->idx) {
6881 struct i40e_vsi *vsi = pf->vsi[v];
6995b36c 6882
41c445ff
JB
6883 vsi->uplink_seid = veb->seid;
6884 ret = i40e_add_vsi(vsi);
6885 if (ret) {
6886 dev_info(&pf->pdev->dev,
6887 "rebuild of vsi_idx %d failed: %d\n",
6888 v, ret);
6889 goto end_reconstitute;
6890 }
6891 i40e_vsi_reset_stats(vsi);
6892 }
6893 }
6894
6895 /* create any VEBs attached to this VEB - RECURSION */
6896 for (veb_idx = 0; veb_idx < I40E_MAX_VEB; veb_idx++) {
6897 if (pf->veb[veb_idx] && pf->veb[veb_idx]->veb_idx == veb->idx) {
6898 pf->veb[veb_idx]->uplink_seid = veb->seid;
6899 ret = i40e_reconstitute_veb(pf->veb[veb_idx]);
6900 if (ret)
6901 break;
6902 }
6903 }
6904
6905end_reconstitute:
6906 return ret;
6907}
6908
6909/**
6910 * i40e_get_capabilities - get info about the HW
6911 * @pf: the PF struct
6912 **/
6913static int i40e_get_capabilities(struct i40e_pf *pf)
6914{
6915 struct i40e_aqc_list_capabilities_element_resp *cap_buf;
6916 u16 data_size;
6917 int buf_len;
6918 int err;
6919
6920 buf_len = 40 * sizeof(struct i40e_aqc_list_capabilities_element_resp);
6921 do {
6922 cap_buf = kzalloc(buf_len, GFP_KERNEL);
6923 if (!cap_buf)
6924 return -ENOMEM;
6925
6926 /* this loads the data into the hw struct for us */
6927 err = i40e_aq_discover_capabilities(&pf->hw, cap_buf, buf_len,
6928 &data_size,
6929 i40e_aqc_opc_list_func_capabilities,
6930 NULL);
6931 /* data loaded, buffer no longer needed */
6932 kfree(cap_buf);
6933
6934 if (pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOMEM) {
6935 /* retry with a larger buffer */
6936 buf_len = data_size;
6937 } else if (pf->hw.aq.asq_last_status != I40E_AQ_RC_OK) {
6938 dev_info(&pf->pdev->dev,
f1c7e72e
SN
6939 "capability discovery failed, err %s aq_err %s\n",
6940 i40e_stat_str(&pf->hw, err),
6941 i40e_aq_str(&pf->hw,
6942 pf->hw.aq.asq_last_status));
41c445ff
JB
6943 return -ENODEV;
6944 }
6945 } while (err);
6946
6947 if (pf->hw.debug_mask & I40E_DEBUG_USER)
6948 dev_info(&pf->pdev->dev,
6949 "pf=%d, num_vfs=%d, msix_pf=%d, msix_vf=%d, fd_g=%d, fd_b=%d, pf_max_q=%d num_vsi=%d\n",
6950 pf->hw.pf_id, pf->hw.func_caps.num_vfs,
6951 pf->hw.func_caps.num_msix_vectors,
6952 pf->hw.func_caps.num_msix_vectors_vf,
6953 pf->hw.func_caps.fd_filters_guaranteed,
6954 pf->hw.func_caps.fd_filters_best_effort,
6955 pf->hw.func_caps.num_tx_qp,
6956 pf->hw.func_caps.num_vsis);
6957
7134f9ce
JB
6958#define DEF_NUM_VSI (1 + (pf->hw.func_caps.fcoe ? 1 : 0) \
6959 + pf->hw.func_caps.num_vfs)
6960 if (pf->hw.revision_id == 0 && (DEF_NUM_VSI > pf->hw.func_caps.num_vsis)) {
6961 dev_info(&pf->pdev->dev,
6962 "got num_vsis %d, setting num_vsis to %d\n",
6963 pf->hw.func_caps.num_vsis, DEF_NUM_VSI);
6964 pf->hw.func_caps.num_vsis = DEF_NUM_VSI;
6965 }
6966
41c445ff
JB
6967 return 0;
6968}
6969
cbf61325
ASJ
6970static int i40e_vsi_clear(struct i40e_vsi *vsi);
6971
41c445ff 6972/**
cbf61325 6973 * i40e_fdir_sb_setup - initialize the Flow Director resources for Sideband
41c445ff
JB
6974 * @pf: board private structure
6975 **/
cbf61325 6976static void i40e_fdir_sb_setup(struct i40e_pf *pf)
41c445ff
JB
6977{
6978 struct i40e_vsi *vsi;
41c445ff 6979
407e063c
JB
6980 /* quick workaround for an NVM issue that leaves a critical register
6981 * uninitialized
6982 */
6983 if (!rd32(&pf->hw, I40E_GLQF_HKEY(0))) {
6984 static const u32 hkey[] = {
6985 0xe640d33f, 0xcdfe98ab, 0x73fa7161, 0x0d7a7d36,
6986 0xeacb7d61, 0xaa4f05b6, 0x9c5c89ed, 0xfc425ddb,
6987 0xa4654832, 0xfc7461d4, 0x8f827619, 0xf5c63c21,
6988 0x95b3a76d};
4b816446 6989 int i;
407e063c
JB
6990
6991 for (i = 0; i <= I40E_GLQF_HKEY_MAX_INDEX; i++)
6992 wr32(&pf->hw, I40E_GLQF_HKEY(i), hkey[i]);
6993 }
6994
cbf61325 6995 if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
41c445ff
JB
6996 return;
6997
cbf61325 6998 /* find existing VSI and see if it needs configuring */
4b816446 6999 vsi = i40e_find_vsi_by_type(pf, I40E_VSI_FDIR);
cbf61325
ASJ
7000
7001 /* create a new VSI if none exists */
41c445ff 7002 if (!vsi) {
cbf61325
ASJ
7003 vsi = i40e_vsi_setup(pf, I40E_VSI_FDIR,
7004 pf->vsi[pf->lan_vsi]->seid, 0);
41c445ff
JB
7005 if (!vsi) {
7006 dev_info(&pf->pdev->dev, "Couldn't create FDir VSI\n");
8a9eb7d3
SN
7007 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
7008 return;
41c445ff 7009 }
cbf61325 7010 }
41c445ff 7011
8a9eb7d3 7012 i40e_vsi_setup_irqhandler(vsi, i40e_fdir_clean_ring);
41c445ff
JB
7013}
7014
7015/**
7016 * i40e_fdir_teardown - release the Flow Director resources
7017 * @pf: board private structure
7018 **/
7019static void i40e_fdir_teardown(struct i40e_pf *pf)
7020{
4b816446 7021 struct i40e_vsi *vsi;
41c445ff 7022
17a73f6b 7023 i40e_fdir_filter_exit(pf);
4b816446
AD
7024 vsi = i40e_find_vsi_by_type(pf, I40E_VSI_FDIR);
7025 if (vsi)
7026 i40e_vsi_release(vsi);
41c445ff
JB
7027}
7028
7029/**
f650a38b 7030 * i40e_prep_for_reset - prep for the core to reset
41c445ff 7031 * @pf: board private structure
373149fc
MS
7032 * @lock_acquired: indicates whether or not the lock has been acquired
7033 * before this function was called.
41c445ff 7034 *
b40c82e6 7035 * Close up the VFs and other things in prep for PF Reset.
f650a38b 7036 **/
373149fc 7037static void i40e_prep_for_reset(struct i40e_pf *pf, bool lock_acquired)
41c445ff 7038{
41c445ff 7039 struct i40e_hw *hw = &pf->hw;
60442dea 7040 i40e_status ret = 0;
41c445ff
JB
7041 u32 v;
7042
0da36b97
JK
7043 clear_bit(__I40E_RESET_INTR_RECEIVED, pf->state);
7044 if (test_and_set_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
23cfbe07 7045 return;
d3ce5734
MW
7046 if (i40e_check_asq_alive(&pf->hw))
7047 i40e_vc_notify_reset(pf);
41c445ff 7048
69bfb110 7049 dev_dbg(&pf->pdev->dev, "Tearing down internal switch for reset\n");
41c445ff 7050
41c445ff 7051 /* quiesce the VSIs and their queues that are not already DOWN */
373149fc
MS
7052 /* pf_quiesce_all_vsi modifies netdev structures -rtnl_lock needed */
7053 if (!lock_acquired)
7054 rtnl_lock();
41c445ff 7055 i40e_pf_quiesce_all_vsi(pf);
373149fc
MS
7056 if (!lock_acquired)
7057 rtnl_unlock();
41c445ff 7058
505682cd 7059 for (v = 0; v < pf->num_alloc_vsi; v++) {
41c445ff
JB
7060 if (pf->vsi[v])
7061 pf->vsi[v]->seid = 0;
7062 }
7063
7064 i40e_shutdown_adminq(&pf->hw);
7065
f650a38b 7066 /* call shutdown HMC */
60442dea
SN
7067 if (hw->hmc.hmc_obj) {
7068 ret = i40e_shutdown_lan_hmc(hw);
23cfbe07 7069 if (ret)
60442dea
SN
7070 dev_warn(&pf->pdev->dev,
7071 "shutdown_lan_hmc failed: %d\n", ret);
f650a38b 7072 }
f650a38b
ASJ
7073}
7074
44033fac
JB
7075/**
7076 * i40e_send_version - update firmware with driver version
7077 * @pf: PF struct
7078 */
7079static void i40e_send_version(struct i40e_pf *pf)
7080{
7081 struct i40e_driver_version dv;
7082
7083 dv.major_version = DRV_VERSION_MAJOR;
7084 dv.minor_version = DRV_VERSION_MINOR;
7085 dv.build_version = DRV_VERSION_BUILD;
7086 dv.subbuild_version = 0;
35a7d804 7087 strlcpy(dv.driver_string, DRV_VERSION, sizeof(dv.driver_string));
44033fac
JB
7088 i40e_aq_send_driver_version(&pf->hw, &dv, NULL);
7089}
7090
5bbb2e20
FS
7091/**
7092 * i40e_get_oem_version - get OEM specific version information
7093 * @hw: pointer to the hardware structure
7094 **/
7095static void i40e_get_oem_version(struct i40e_hw *hw)
7096{
7097 u16 block_offset = 0xffff;
7098 u16 block_length = 0;
7099 u16 capabilities = 0;
7100 u16 gen_snap = 0;
7101 u16 release = 0;
7102
7103#define I40E_SR_NVM_OEM_VERSION_PTR 0x1B
7104#define I40E_NVM_OEM_LENGTH_OFFSET 0x00
7105#define I40E_NVM_OEM_CAPABILITIES_OFFSET 0x01
7106#define I40E_NVM_OEM_GEN_OFFSET 0x02
7107#define I40E_NVM_OEM_RELEASE_OFFSET 0x03
7108#define I40E_NVM_OEM_CAPABILITIES_MASK 0x000F
7109#define I40E_NVM_OEM_LENGTH 3
7110
7111 /* Check if pointer to OEM version block is valid. */
7112 i40e_read_nvm_word(hw, I40E_SR_NVM_OEM_VERSION_PTR, &block_offset);
7113 if (block_offset == 0xffff)
7114 return;
7115
7116 /* Check if OEM version block has correct length. */
7117 i40e_read_nvm_word(hw, block_offset + I40E_NVM_OEM_LENGTH_OFFSET,
7118 &block_length);
7119 if (block_length < I40E_NVM_OEM_LENGTH)
7120 return;
7121
7122 /* Check if OEM version format is as expected. */
7123 i40e_read_nvm_word(hw, block_offset + I40E_NVM_OEM_CAPABILITIES_OFFSET,
7124 &capabilities);
7125 if ((capabilities & I40E_NVM_OEM_CAPABILITIES_MASK) != 0)
7126 return;
7127
7128 i40e_read_nvm_word(hw, block_offset + I40E_NVM_OEM_GEN_OFFSET,
7129 &gen_snap);
7130 i40e_read_nvm_word(hw, block_offset + I40E_NVM_OEM_RELEASE_OFFSET,
7131 &release);
7132 hw->nvm.oem_ver = (gen_snap << I40E_OEM_SNAP_SHIFT) | release;
7133 hw->nvm.eetrack = I40E_OEM_EETRACK_ID;
7134}
7135
f650a38b 7136/**
373149fc 7137 * i40e_reset - wait for core reset to finish reset, reset pf if corer not seen
f650a38b
ASJ
7138 * @pf: board private structure
7139 **/
373149fc 7140static int i40e_reset(struct i40e_pf *pf)
f650a38b 7141{
f650a38b
ASJ
7142 struct i40e_hw *hw = &pf->hw;
7143 i40e_status ret;
f650a38b 7144
41c445ff 7145 ret = i40e_pf_reset(hw);
b5565400 7146 if (ret) {
41c445ff 7147 dev_info(&pf->pdev->dev, "PF reset failed, %d\n", ret);
0da36b97
JK
7148 set_bit(__I40E_RESET_FAILED, pf->state);
7149 clear_bit(__I40E_RESET_RECOVERY_PENDING, pf->state);
373149fc
MS
7150 } else {
7151 pf->pfr_count++;
b5565400 7152 }
373149fc
MS
7153 return ret;
7154}
7155
7156/**
7157 * i40e_rebuild - rebuild using a saved config
7158 * @pf: board private structure
7159 * @reinit: if the Main VSI needs to re-initialized.
7160 * @lock_acquired: indicates whether or not the lock has been acquired
7161 * before this function was called.
7162 **/
7163static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
7164{
7165 struct i40e_hw *hw = &pf->hw;
7166 u8 set_fc_aq_fail = 0;
7167 i40e_status ret;
7168 u32 val;
7169 int v;
41c445ff 7170
9e6c9c0f 7171 if (test_bit(__I40E_DOWN, pf->state))
a316f651 7172 goto clear_recovery;
69bfb110 7173 dev_dbg(&pf->pdev->dev, "Rebuilding internal switch\n");
41c445ff
JB
7174
7175 /* rebuild the basics for the AdminQ, HMC, and initial HW switch */
7176 ret = i40e_init_adminq(&pf->hw);
7177 if (ret) {
f1c7e72e
SN
7178 dev_info(&pf->pdev->dev, "Rebuild AdminQ failed, err %s aq_err %s\n",
7179 i40e_stat_str(&pf->hw, ret),
7180 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
a316f651 7181 goto clear_recovery;
41c445ff 7182 }
5bbb2e20 7183 i40e_get_oem_version(&pf->hw);
41c445ff 7184
4eb3f768 7185 /* re-verify the eeprom if we just had an EMP reset */
0da36b97 7186 if (test_and_clear_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state))
4eb3f768 7187 i40e_verify_eeprom(pf);
4eb3f768 7188
e78ac4bf 7189 i40e_clear_pxe_mode(hw);
41c445ff 7190 ret = i40e_get_capabilities(pf);
f1c7e72e 7191 if (ret)
41c445ff 7192 goto end_core_reset;
41c445ff 7193
41c445ff 7194 ret = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
c76cb6ed 7195 hw->func_caps.num_rx_qp, 0, 0);
41c445ff
JB
7196 if (ret) {
7197 dev_info(&pf->pdev->dev, "init_lan_hmc failed: %d\n", ret);
7198 goto end_core_reset;
7199 }
7200 ret = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
7201 if (ret) {
7202 dev_info(&pf->pdev->dev, "configure_lan_hmc failed: %d\n", ret);
7203 goto end_core_reset;
7204 }
7205
4e3b35b0
NP
7206#ifdef CONFIG_I40E_DCB
7207 ret = i40e_init_pf_dcb(pf);
7208 if (ret) {
aebfc816
SN
7209 dev_info(&pf->pdev->dev, "DCB init failed %d, disabled\n", ret);
7210 pf->flags &= ~I40E_FLAG_DCB_CAPABLE;
7211 /* Continue without DCB enabled */
4e3b35b0
NP
7212 }
7213#endif /* CONFIG_I40E_DCB */
41c445ff 7214 /* do basic switch setup */
373149fc
MS
7215 if (!lock_acquired)
7216 rtnl_lock();
bc7d338f 7217 ret = i40e_setup_pf_switch(pf, reinit);
41c445ff 7218 if (ret)
373149fc 7219 goto end_unlock;
41c445ff 7220
2f0aff41
SN
7221 /* The driver only wants link up/down and module qualification
7222 * reports from firmware. Note the negative logic.
7e2453fe
JB
7223 */
7224 ret = i40e_aq_set_phy_int_mask(&pf->hw,
2f0aff41 7225 ~(I40E_AQ_EVENT_LINK_UPDOWN |
867a79e3 7226 I40E_AQ_EVENT_MEDIA_NA |
2f0aff41 7227 I40E_AQ_EVENT_MODULE_QUAL_FAIL), NULL);
7e2453fe 7228 if (ret)
f1c7e72e
SN
7229 dev_info(&pf->pdev->dev, "set phy mask fail, err %s aq_err %s\n",
7230 i40e_stat_str(&pf->hw, ret),
7231 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
7e2453fe 7232
cafa2ee6
ASJ
7233 /* make sure our flow control settings are restored */
7234 ret = i40e_set_fc(&pf->hw, &set_fc_aq_fail, true);
7235 if (ret)
8279e495
NP
7236 dev_dbg(&pf->pdev->dev, "setting flow control: ret = %s last_status = %s\n",
7237 i40e_stat_str(&pf->hw, ret),
7238 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
cafa2ee6 7239
41c445ff
JB
7240 /* Rebuild the VSIs and VEBs that existed before reset.
7241 * They are still in our local switch element arrays, so only
7242 * need to rebuild the switch model in the HW.
7243 *
7244 * If there were VEBs but the reconstitution failed, we'll try
7245 * try to recover minimal use by getting the basic PF VSI working.
7246 */
7247 if (pf->vsi[pf->lan_vsi]->uplink_seid != pf->mac_seid) {
69bfb110 7248 dev_dbg(&pf->pdev->dev, "attempting to rebuild switch\n");
41c445ff
JB
7249 /* find the one VEB connected to the MAC, and find orphans */
7250 for (v = 0; v < I40E_MAX_VEB; v++) {
7251 if (!pf->veb[v])
7252 continue;
7253
7254 if (pf->veb[v]->uplink_seid == pf->mac_seid ||
7255 pf->veb[v]->uplink_seid == 0) {
7256 ret = i40e_reconstitute_veb(pf->veb[v]);
7257
7258 if (!ret)
7259 continue;
7260
7261 /* If Main VEB failed, we're in deep doodoo,
7262 * so give up rebuilding the switch and set up
7263 * for minimal rebuild of PF VSI.
7264 * If orphan failed, we'll report the error
7265 * but try to keep going.
7266 */
7267 if (pf->veb[v]->uplink_seid == pf->mac_seid) {
7268 dev_info(&pf->pdev->dev,
7269 "rebuild of switch failed: %d, will try to set up simple PF connection\n",
7270 ret);
7271 pf->vsi[pf->lan_vsi]->uplink_seid
7272 = pf->mac_seid;
7273 break;
7274 } else if (pf->veb[v]->uplink_seid == 0) {
7275 dev_info(&pf->pdev->dev,
7276 "rebuild of orphan VEB failed: %d\n",
7277 ret);
7278 }
7279 }
7280 }
7281 }
7282
7283 if (pf->vsi[pf->lan_vsi]->uplink_seid == pf->mac_seid) {
cde4cbc7 7284 dev_dbg(&pf->pdev->dev, "attempting to rebuild PF VSI\n");
41c445ff
JB
7285 /* no VEB, so rebuild only the Main VSI */
7286 ret = i40e_add_vsi(pf->vsi[pf->lan_vsi]);
7287 if (ret) {
7288 dev_info(&pf->pdev->dev,
7289 "rebuild of Main VSI failed: %d\n", ret);
373149fc 7290 goto end_unlock;
41c445ff
JB
7291 }
7292 }
7293
4f2f017c
ASJ
7294 /* Reconfigure hardware for allowing smaller MSS in the case
7295 * of TSO, so that we avoid the MDD being fired and causing
7296 * a reset in the case of small MSS+TSO.
7297 */
7298#define I40E_REG_MSS 0x000E64DC
7299#define I40E_REG_MSS_MIN_MASK 0x3FF0000
7300#define I40E_64BYTE_MSS 0x400000
7301 val = rd32(hw, I40E_REG_MSS);
7302 if ((val & I40E_REG_MSS_MIN_MASK) > I40E_64BYTE_MSS) {
7303 val &= ~I40E_REG_MSS_MIN_MASK;
7304 val |= I40E_64BYTE_MSS;
7305 wr32(hw, I40E_REG_MSS, val);
7306 }
7307
d36e41dc 7308 if (pf->hw_features & I40E_HW_RESTART_AUTONEG) {
025b4a54
ASJ
7309 msleep(75);
7310 ret = i40e_aq_set_link_restart_an(&pf->hw, true, NULL);
7311 if (ret)
f1c7e72e
SN
7312 dev_info(&pf->pdev->dev, "link restart failed, err %s aq_err %s\n",
7313 i40e_stat_str(&pf->hw, ret),
7314 i40e_aq_str(&pf->hw,
7315 pf->hw.aq.asq_last_status));
cafa2ee6 7316 }
41c445ff
JB
7317 /* reinit the misc interrupt */
7318 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
7319 ret = i40e_setup_misc_vector(pf);
7320
e7358f54
ASJ
7321 /* Add a filter to drop all Flow control frames from any VSI from being
7322 * transmitted. By doing so we stop a malicious VF from sending out
7323 * PAUSE or PFC frames and potentially controlling traffic for other
7324 * PF/VF VSIs.
7325 * The FW can still send Flow control frames if enabled.
7326 */
7327 i40e_add_filter_to_drop_tx_flow_control_frames(&pf->hw,
7328 pf->main_vsi_seid);
7329
41c445ff
JB
7330 /* restart the VSIs that were rebuilt and running before the reset */
7331 i40e_pf_unquiesce_all_vsi(pf);
7332
024b05f4
JK
7333 /* Release the RTNL lock before we start resetting VFs */
7334 if (!lock_acquired)
7335 rtnl_unlock();
7336
e4b433f4 7337 i40e_reset_all_vfs(pf, true);
69f64b2b 7338
41c445ff 7339 /* tell the firmware that we're starting */
44033fac 7340 i40e_send_version(pf);
41c445ff 7341
024b05f4
JK
7342 /* We've already released the lock, so don't do it again */
7343 goto end_core_reset;
7344
373149fc 7345end_unlock:
024b05f4
JK
7346 if (!lock_acquired)
7347 rtnl_unlock();
41c445ff 7348end_core_reset:
0da36b97 7349 clear_bit(__I40E_RESET_FAILED, pf->state);
a316f651 7350clear_recovery:
0da36b97 7351 clear_bit(__I40E_RESET_RECOVERY_PENDING, pf->state);
41c445ff
JB
7352}
7353
373149fc
MS
7354/**
7355 * i40e_reset_and_rebuild - reset and rebuild using a saved config
7356 * @pf: board private structure
7357 * @reinit: if the Main VSI needs to re-initialized.
7358 * @lock_acquired: indicates whether or not the lock has been acquired
7359 * before this function was called.
7360 **/
7361static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit,
7362 bool lock_acquired)
7363{
7364 int ret;
7365 /* Now we wait for GRST to settle out.
7366 * We don't have to delete the VEBs or VSIs from the hw switch
7367 * because the reset will make them disappear.
7368 */
7369 ret = i40e_reset(pf);
7370 if (!ret)
7371 i40e_rebuild(pf, reinit, lock_acquired);
7372}
7373
f650a38b 7374/**
b40c82e6 7375 * i40e_handle_reset_warning - prep for the PF to reset, reset and rebuild
f650a38b
ASJ
7376 * @pf: board private structure
7377 *
7378 * Close up the VFs and other things in prep for a Core Reset,
7379 * then get ready to rebuild the world.
373149fc
MS
7380 * @lock_acquired: indicates whether or not the lock has been acquired
7381 * before this function was called.
f650a38b 7382 **/
373149fc 7383static void i40e_handle_reset_warning(struct i40e_pf *pf, bool lock_acquired)
f650a38b 7384{
373149fc
MS
7385 i40e_prep_for_reset(pf, lock_acquired);
7386 i40e_reset_and_rebuild(pf, false, lock_acquired);
f650a38b
ASJ
7387}
7388
41c445ff
JB
7389/**
7390 * i40e_handle_mdd_event
b40c82e6 7391 * @pf: pointer to the PF structure
41c445ff
JB
7392 *
7393 * Called from the MDD irq handler to identify possibly malicious vfs
7394 **/
7395static void i40e_handle_mdd_event(struct i40e_pf *pf)
7396{
7397 struct i40e_hw *hw = &pf->hw;
7398 bool mdd_detected = false;
df430b12 7399 bool pf_mdd_detected = false;
41c445ff
JB
7400 struct i40e_vf *vf;
7401 u32 reg;
7402 int i;
7403
0da36b97 7404 if (!test_bit(__I40E_MDD_EVENT_PENDING, pf->state))
41c445ff
JB
7405 return;
7406
7407 /* find what triggered the MDD event */
7408 reg = rd32(hw, I40E_GL_MDET_TX);
7409 if (reg & I40E_GL_MDET_TX_VALID_MASK) {
4c33f83a
ASJ
7410 u8 pf_num = (reg & I40E_GL_MDET_TX_PF_NUM_MASK) >>
7411 I40E_GL_MDET_TX_PF_NUM_SHIFT;
2089ad03 7412 u16 vf_num = (reg & I40E_GL_MDET_TX_VF_NUM_MASK) >>
4c33f83a 7413 I40E_GL_MDET_TX_VF_NUM_SHIFT;
013f6579 7414 u8 event = (reg & I40E_GL_MDET_TX_EVENT_MASK) >>
4c33f83a 7415 I40E_GL_MDET_TX_EVENT_SHIFT;
2089ad03
MW
7416 u16 queue = ((reg & I40E_GL_MDET_TX_QUEUE_MASK) >>
7417 I40E_GL_MDET_TX_QUEUE_SHIFT) -
7418 pf->hw.func_caps.base_queue;
faf32978 7419 if (netif_msg_tx_err(pf))
b40c82e6 7420 dev_info(&pf->pdev->dev, "Malicious Driver Detection event 0x%02x on TX queue %d PF number 0x%02x VF number 0x%02x\n",
faf32978 7421 event, queue, pf_num, vf_num);
41c445ff
JB
7422 wr32(hw, I40E_GL_MDET_TX, 0xffffffff);
7423 mdd_detected = true;
7424 }
7425 reg = rd32(hw, I40E_GL_MDET_RX);
7426 if (reg & I40E_GL_MDET_RX_VALID_MASK) {
4c33f83a
ASJ
7427 u8 func = (reg & I40E_GL_MDET_RX_FUNCTION_MASK) >>
7428 I40E_GL_MDET_RX_FUNCTION_SHIFT;
013f6579 7429 u8 event = (reg & I40E_GL_MDET_RX_EVENT_MASK) >>
4c33f83a 7430 I40E_GL_MDET_RX_EVENT_SHIFT;
2089ad03
MW
7431 u16 queue = ((reg & I40E_GL_MDET_RX_QUEUE_MASK) >>
7432 I40E_GL_MDET_RX_QUEUE_SHIFT) -
7433 pf->hw.func_caps.base_queue;
faf32978
JB
7434 if (netif_msg_rx_err(pf))
7435 dev_info(&pf->pdev->dev, "Malicious Driver Detection event 0x%02x on RX queue %d of function 0x%02x\n",
7436 event, queue, func);
41c445ff
JB
7437 wr32(hw, I40E_GL_MDET_RX, 0xffffffff);
7438 mdd_detected = true;
7439 }
7440
df430b12
NP
7441 if (mdd_detected) {
7442 reg = rd32(hw, I40E_PF_MDET_TX);
7443 if (reg & I40E_PF_MDET_TX_VALID_MASK) {
7444 wr32(hw, I40E_PF_MDET_TX, 0xFFFF);
faf32978 7445 dev_info(&pf->pdev->dev, "TX driver issue detected, PF reset issued\n");
df430b12
NP
7446 pf_mdd_detected = true;
7447 }
7448 reg = rd32(hw, I40E_PF_MDET_RX);
7449 if (reg & I40E_PF_MDET_RX_VALID_MASK) {
7450 wr32(hw, I40E_PF_MDET_RX, 0xFFFF);
faf32978 7451 dev_info(&pf->pdev->dev, "RX driver issue detected, PF reset issued\n");
df430b12
NP
7452 pf_mdd_detected = true;
7453 }
7454 /* Queue belongs to the PF, initiate a reset */
7455 if (pf_mdd_detected) {
0da36b97 7456 set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
df430b12
NP
7457 i40e_service_event_schedule(pf);
7458 }
7459 }
7460
41c445ff
JB
7461 /* see if one of the VFs needs its hand slapped */
7462 for (i = 0; i < pf->num_alloc_vfs && mdd_detected; i++) {
7463 vf = &(pf->vf[i]);
7464 reg = rd32(hw, I40E_VP_MDET_TX(i));
7465 if (reg & I40E_VP_MDET_TX_VALID_MASK) {
7466 wr32(hw, I40E_VP_MDET_TX(i), 0xFFFF);
7467 vf->num_mdd_events++;
faf32978
JB
7468 dev_info(&pf->pdev->dev, "TX driver issue detected on VF %d\n",
7469 i);
41c445ff
JB
7470 }
7471
7472 reg = rd32(hw, I40E_VP_MDET_RX(i));
7473 if (reg & I40E_VP_MDET_RX_VALID_MASK) {
7474 wr32(hw, I40E_VP_MDET_RX(i), 0xFFFF);
7475 vf->num_mdd_events++;
faf32978
JB
7476 dev_info(&pf->pdev->dev, "RX driver issue detected on VF %d\n",
7477 i);
41c445ff
JB
7478 }
7479
7480 if (vf->num_mdd_events > I40E_DEFAULT_NUM_MDD_EVENTS_ALLOWED) {
7481 dev_info(&pf->pdev->dev,
7482 "Too many MDD events on VF %d, disabled\n", i);
7483 dev_info(&pf->pdev->dev,
7484 "Use PF Control I/F to re-enable the VF\n");
6322e63c 7485 set_bit(I40E_VF_STATE_DISABLED, &vf->vf_states);
41c445ff
JB
7486 }
7487 }
7488
7489 /* re-enable mdd interrupt cause */
0da36b97 7490 clear_bit(__I40E_MDD_EVENT_PENDING, pf->state);
41c445ff
JB
7491 reg = rd32(hw, I40E_PFINT_ICR0_ENA);
7492 reg |= I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
7493 wr32(hw, I40E_PFINT_ICR0_ENA, reg);
7494 i40e_flush(hw);
7495}
7496
d8b2c700
JK
7497static const char *i40e_tunnel_name(struct i40e_udp_port_config *port)
7498{
7499 switch (port->type) {
7500 case UDP_TUNNEL_TYPE_VXLAN:
7501 return "vxlan";
7502 case UDP_TUNNEL_TYPE_GENEVE:
7503 return "geneve";
7504 default:
7505 return "unknown";
7506 }
7507}
7508
1f190d93
AD
7509/**
7510 * i40e_sync_udp_filters - Trigger a sync event for existing UDP filters
7511 * @pf: board private structure
7512 **/
7513static void i40e_sync_udp_filters(struct i40e_pf *pf)
7514{
7515 int i;
7516
7517 /* loop through and set pending bit for all active UDP filters */
7518 for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
7519 if (pf->udp_ports[i].port)
7520 pf->pending_udp_bitmap |= BIT_ULL(i);
7521 }
7522
7523 pf->flags |= I40E_FLAG_UDP_FILTER_SYNC;
7524}
7525
a1c9a9d9 7526/**
6a899024 7527 * i40e_sync_udp_filters_subtask - Sync the VSI filter list with HW
a1c9a9d9
JK
7528 * @pf: board private structure
7529 **/
6a899024 7530static void i40e_sync_udp_filters_subtask(struct i40e_pf *pf)
a1c9a9d9 7531{
a1c9a9d9
JK
7532 struct i40e_hw *hw = &pf->hw;
7533 i40e_status ret;
fe0b0cd9 7534 u16 port;
a1c9a9d9
JK
7535 int i;
7536
6a899024 7537 if (!(pf->flags & I40E_FLAG_UDP_FILTER_SYNC))
a1c9a9d9
JK
7538 return;
7539
6a899024 7540 pf->flags &= ~I40E_FLAG_UDP_FILTER_SYNC;
a1c9a9d9
JK
7541
7542 for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
6a899024
SA
7543 if (pf->pending_udp_bitmap & BIT_ULL(i)) {
7544 pf->pending_udp_bitmap &= ~BIT_ULL(i);
27826fd5 7545 port = pf->udp_ports[i].port;
c22c06c8 7546 if (port)
b3f5c7bc
CW
7547 ret = i40e_aq_add_udp_tunnel(hw, port,
7548 pf->udp_ports[i].type,
7549 NULL, NULL);
c22c06c8
SN
7550 else
7551 ret = i40e_aq_del_udp_tunnel(hw, i, NULL);
a1c9a9d9
JK
7552
7553 if (ret) {
d8b2c700
JK
7554 dev_info(&pf->pdev->dev,
7555 "%s %s port %d, index %d failed, err %s aq_err %s\n",
7556 i40e_tunnel_name(&pf->udp_ports[i]),
7557 port ? "add" : "delete",
7558 port, i,
7559 i40e_stat_str(&pf->hw, ret),
7560 i40e_aq_str(&pf->hw,
7561 pf->hw.aq.asq_last_status));
27826fd5 7562 pf->udp_ports[i].port = 0;
a1c9a9d9
JK
7563 }
7564 }
7565 }
7566}
7567
41c445ff
JB
7568/**
7569 * i40e_service_task - Run the driver's async subtasks
7570 * @work: pointer to work_struct containing our data
7571 **/
7572static void i40e_service_task(struct work_struct *work)
7573{
7574 struct i40e_pf *pf = container_of(work,
7575 struct i40e_pf,
7576 service_task);
7577 unsigned long start_time = jiffies;
7578
e57a2fea 7579 /* don't bother with service tasks if a reset is in progress */
0da36b97 7580 if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
e57a2fea 7581 return;
e57a2fea 7582
0da36b97 7583 if (test_and_set_bit(__I40E_SERVICE_SCHED, pf->state))
91089033
MW
7584 return;
7585
b03a8c1f 7586 i40e_detect_recover_hung(pf);
2818ccd9 7587 i40e_sync_filters_subtask(pf);
41c445ff
JB
7588 i40e_reset_subtask(pf);
7589 i40e_handle_mdd_event(pf);
7590 i40e_vc_process_vflr_event(pf);
7591 i40e_watchdog_subtask(pf);
7592 i40e_fdir_reinit_subtask(pf);
0ef2d5af
MW
7593 if (pf->flags & I40E_FLAG_CLIENT_RESET) {
7594 /* Client subtask will reopen next time through. */
7595 i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], true);
7596 pf->flags &= ~I40E_FLAG_CLIENT_RESET;
7597 } else {
7598 i40e_client_subtask(pf);
7599 if (pf->flags & I40E_FLAG_CLIENT_L2_CHANGE) {
7600 i40e_notify_client_of_l2_param_changes(
7601 pf->vsi[pf->lan_vsi]);
7602 pf->flags &= ~I40E_FLAG_CLIENT_L2_CHANGE;
7603 }
7604 }
41c445ff 7605 i40e_sync_filters_subtask(pf);
6a899024 7606 i40e_sync_udp_filters_subtask(pf);
41c445ff
JB
7607 i40e_clean_adminq_subtask(pf);
7608
91089033
MW
7609 /* flush memory to make sure state is correct before next watchdog */
7610 smp_mb__before_atomic();
0da36b97 7611 clear_bit(__I40E_SERVICE_SCHED, pf->state);
41c445ff
JB
7612
7613 /* If the tasks have taken longer than one timer cycle or there
7614 * is more work to be done, reschedule the service task now
7615 * rather than wait for the timer to tick again.
7616 */
7617 if (time_after(jiffies, (start_time + pf->service_timer_period)) ||
0da36b97
JK
7618 test_bit(__I40E_ADMINQ_EVENT_PENDING, pf->state) ||
7619 test_bit(__I40E_MDD_EVENT_PENDING, pf->state) ||
7620 test_bit(__I40E_VFLR_EVENT_PENDING, pf->state))
41c445ff
JB
7621 i40e_service_event_schedule(pf);
7622}
7623
7624/**
7625 * i40e_service_timer - timer callback
7626 * @data: pointer to PF struct
7627 **/
7628static void i40e_service_timer(unsigned long data)
7629{
7630 struct i40e_pf *pf = (struct i40e_pf *)data;
7631
7632 mod_timer(&pf->service_timer,
7633 round_jiffies(jiffies + pf->service_timer_period));
7634 i40e_service_event_schedule(pf);
7635}
7636
7637/**
7638 * i40e_set_num_rings_in_vsi - Determine number of rings in the VSI
7639 * @vsi: the VSI being configured
7640 **/
7641static int i40e_set_num_rings_in_vsi(struct i40e_vsi *vsi)
7642{
7643 struct i40e_pf *pf = vsi->back;
7644
7645 switch (vsi->type) {
7646 case I40E_VSI_MAIN:
7647 vsi->alloc_queue_pairs = pf->num_lan_qps;
7648 vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
7649 I40E_REQ_DESCRIPTOR_MULTIPLE);
7650 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
7651 vsi->num_q_vectors = pf->num_lan_msix;
7652 else
7653 vsi->num_q_vectors = 1;
7654
7655 break;
7656
7657 case I40E_VSI_FDIR:
7658 vsi->alloc_queue_pairs = 1;
7659 vsi->num_desc = ALIGN(I40E_FDIR_RING_COUNT,
7660 I40E_REQ_DESCRIPTOR_MULTIPLE);
a70e407f 7661 vsi->num_q_vectors = pf->num_fdsb_msix;
41c445ff
JB
7662 break;
7663
7664 case I40E_VSI_VMDQ2:
7665 vsi->alloc_queue_pairs = pf->num_vmdq_qps;
7666 vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
7667 I40E_REQ_DESCRIPTOR_MULTIPLE);
7668 vsi->num_q_vectors = pf->num_vmdq_msix;
7669 break;
7670
7671 case I40E_VSI_SRIOV:
7672 vsi->alloc_queue_pairs = pf->num_vf_qps;
7673 vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
7674 I40E_REQ_DESCRIPTOR_MULTIPLE);
7675 break;
7676
7677 default:
7678 WARN_ON(1);
7679 return -ENODATA;
7680 }
7681
7682 return 0;
7683}
7684
f650a38b
ASJ
7685/**
7686 * i40e_vsi_alloc_arrays - Allocate queue and vector pointer arrays for the vsi
7687 * @type: VSI pointer
bc7d338f 7688 * @alloc_qvectors: a bool to specify if q_vectors need to be allocated.
f650a38b
ASJ
7689 *
7690 * On error: returns error code (negative)
7691 * On success: returns 0
7692 **/
bc7d338f 7693static int i40e_vsi_alloc_arrays(struct i40e_vsi *vsi, bool alloc_qvectors)
f650a38b 7694{
74608d17 7695 struct i40e_ring **next_rings;
f650a38b
ASJ
7696 int size;
7697 int ret = 0;
7698
74608d17
BT
7699 /* allocate memory for both Tx, XDP Tx and Rx ring pointers */
7700 size = sizeof(struct i40e_ring *) * vsi->alloc_queue_pairs *
7701 (i40e_enabled_xdp_vsi(vsi) ? 3 : 2);
f650a38b
ASJ
7702 vsi->tx_rings = kzalloc(size, GFP_KERNEL);
7703 if (!vsi->tx_rings)
7704 return -ENOMEM;
74608d17
BT
7705 next_rings = vsi->tx_rings + vsi->alloc_queue_pairs;
7706 if (i40e_enabled_xdp_vsi(vsi)) {
7707 vsi->xdp_rings = next_rings;
7708 next_rings += vsi->alloc_queue_pairs;
7709 }
7710 vsi->rx_rings = next_rings;
f650a38b 7711
bc7d338f
ASJ
7712 if (alloc_qvectors) {
7713 /* allocate memory for q_vector pointers */
f57e4fbd 7714 size = sizeof(struct i40e_q_vector *) * vsi->num_q_vectors;
bc7d338f
ASJ
7715 vsi->q_vectors = kzalloc(size, GFP_KERNEL);
7716 if (!vsi->q_vectors) {
7717 ret = -ENOMEM;
7718 goto err_vectors;
7719 }
f650a38b
ASJ
7720 }
7721 return ret;
7722
7723err_vectors:
7724 kfree(vsi->tx_rings);
7725 return ret;
7726}
7727
41c445ff
JB
7728/**
7729 * i40e_vsi_mem_alloc - Allocates the next available struct vsi in the PF
7730 * @pf: board private structure
7731 * @type: type of VSI
7732 *
7733 * On error: returns error code (negative)
7734 * On success: returns vsi index in PF (positive)
7735 **/
7736static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type)
7737{
7738 int ret = -ENODEV;
7739 struct i40e_vsi *vsi;
7740 int vsi_idx;
7741 int i;
7742
7743 /* Need to protect the allocation of the VSIs at the PF level */
7744 mutex_lock(&pf->switch_mutex);
7745
7746 /* VSI list may be fragmented if VSI creation/destruction has
7747 * been happening. We can afford to do a quick scan to look
7748 * for any free VSIs in the list.
7749 *
7750 * find next empty vsi slot, looping back around if necessary
7751 */
7752 i = pf->next_vsi;
505682cd 7753 while (i < pf->num_alloc_vsi && pf->vsi[i])
41c445ff 7754 i++;
505682cd 7755 if (i >= pf->num_alloc_vsi) {
41c445ff
JB
7756 i = 0;
7757 while (i < pf->next_vsi && pf->vsi[i])
7758 i++;
7759 }
7760
505682cd 7761 if (i < pf->num_alloc_vsi && !pf->vsi[i]) {
41c445ff
JB
7762 vsi_idx = i; /* Found one! */
7763 } else {
7764 ret = -ENODEV;
493fb300 7765 goto unlock_pf; /* out of VSI slots! */
41c445ff
JB
7766 }
7767 pf->next_vsi = ++i;
7768
7769 vsi = kzalloc(sizeof(*vsi), GFP_KERNEL);
7770 if (!vsi) {
7771 ret = -ENOMEM;
493fb300 7772 goto unlock_pf;
41c445ff
JB
7773 }
7774 vsi->type = type;
7775 vsi->back = pf;
0da36b97 7776 set_bit(__I40E_VSI_DOWN, vsi->state);
41c445ff
JB
7777 vsi->flags = 0;
7778 vsi->idx = vsi_idx;
ac26fc13 7779 vsi->int_rate_limit = 0;
5db4cb59
ASJ
7780 vsi->rss_table_size = (vsi->type == I40E_VSI_MAIN) ?
7781 pf->rss_table_size : 64;
41c445ff
JB
7782 vsi->netdev_registered = false;
7783 vsi->work_limit = I40E_DEFAULT_IRQ_WORK;
278e7d0b 7784 hash_init(vsi->mac_filter_hash);
63741846 7785 vsi->irqs_ready = false;
41c445ff 7786
9f65e15b
AD
7787 ret = i40e_set_num_rings_in_vsi(vsi);
7788 if (ret)
7789 goto err_rings;
7790
bc7d338f 7791 ret = i40e_vsi_alloc_arrays(vsi, true);
f650a38b 7792 if (ret)
9f65e15b 7793 goto err_rings;
493fb300 7794
41c445ff
JB
7795 /* Setup default MSIX irq handler for VSI */
7796 i40e_vsi_setup_irqhandler(vsi, i40e_msix_clean_rings);
7797
21659035 7798 /* Initialize VSI lock */
278e7d0b 7799 spin_lock_init(&vsi->mac_filter_hash_lock);
41c445ff
JB
7800 pf->vsi[vsi_idx] = vsi;
7801 ret = vsi_idx;
493fb300
AD
7802 goto unlock_pf;
7803
9f65e15b 7804err_rings:
493fb300
AD
7805 pf->next_vsi = i - 1;
7806 kfree(vsi);
7807unlock_pf:
41c445ff
JB
7808 mutex_unlock(&pf->switch_mutex);
7809 return ret;
7810}
7811
f650a38b
ASJ
7812/**
7813 * i40e_vsi_free_arrays - Free queue and vector pointer arrays for the VSI
7814 * @type: VSI pointer
bc7d338f 7815 * @free_qvectors: a bool to specify if q_vectors need to be freed.
f650a38b
ASJ
7816 *
7817 * On error: returns error code (negative)
7818 * On success: returns 0
7819 **/
bc7d338f 7820static void i40e_vsi_free_arrays(struct i40e_vsi *vsi, bool free_qvectors)
f650a38b
ASJ
7821{
7822 /* free the ring and vector containers */
bc7d338f
ASJ
7823 if (free_qvectors) {
7824 kfree(vsi->q_vectors);
7825 vsi->q_vectors = NULL;
7826 }
f650a38b
ASJ
7827 kfree(vsi->tx_rings);
7828 vsi->tx_rings = NULL;
7829 vsi->rx_rings = NULL;
74608d17 7830 vsi->xdp_rings = NULL;
f650a38b
ASJ
7831}
7832
28c5869f
HZ
7833/**
7834 * i40e_clear_rss_config_user - clear the user configured RSS hash keys
7835 * and lookup table
7836 * @vsi: Pointer to VSI structure
7837 */
7838static void i40e_clear_rss_config_user(struct i40e_vsi *vsi)
7839{
7840 if (!vsi)
7841 return;
7842
7843 kfree(vsi->rss_hkey_user);
7844 vsi->rss_hkey_user = NULL;
7845
7846 kfree(vsi->rss_lut_user);
7847 vsi->rss_lut_user = NULL;
7848}
7849
41c445ff
JB
7850/**
7851 * i40e_vsi_clear - Deallocate the VSI provided
7852 * @vsi: the VSI being un-configured
7853 **/
7854static int i40e_vsi_clear(struct i40e_vsi *vsi)
7855{
7856 struct i40e_pf *pf;
7857
7858 if (!vsi)
7859 return 0;
7860
7861 if (!vsi->back)
7862 goto free_vsi;
7863 pf = vsi->back;
7864
7865 mutex_lock(&pf->switch_mutex);
7866 if (!pf->vsi[vsi->idx]) {
7867 dev_err(&pf->pdev->dev, "pf->vsi[%d] is NULL, just free vsi[%d](%p,type %d)\n",
7868 vsi->idx, vsi->idx, vsi, vsi->type);
7869 goto unlock_vsi;
7870 }
7871
7872 if (pf->vsi[vsi->idx] != vsi) {
7873 dev_err(&pf->pdev->dev,
7874 "pf->vsi[%d](%p, type %d) != vsi[%d](%p,type %d): no free!\n",
7875 pf->vsi[vsi->idx]->idx,
7876 pf->vsi[vsi->idx],
7877 pf->vsi[vsi->idx]->type,
7878 vsi->idx, vsi, vsi->type);
7879 goto unlock_vsi;
7880 }
7881
b40c82e6 7882 /* updates the PF for this cleared vsi */
41c445ff
JB
7883 i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx);
7884 i40e_put_lump(pf->irq_pile, vsi->base_vector, vsi->idx);
7885
bc7d338f 7886 i40e_vsi_free_arrays(vsi, true);
28c5869f 7887 i40e_clear_rss_config_user(vsi);
493fb300 7888
41c445ff
JB
7889 pf->vsi[vsi->idx] = NULL;
7890 if (vsi->idx < pf->next_vsi)
7891 pf->next_vsi = vsi->idx;
7892
7893unlock_vsi:
7894 mutex_unlock(&pf->switch_mutex);
7895free_vsi:
7896 kfree(vsi);
7897
7898 return 0;
7899}
7900
9f65e15b
AD
7901/**
7902 * i40e_vsi_clear_rings - Deallocates the Rx and Tx rings for the provided VSI
7903 * @vsi: the VSI being cleaned
7904 **/
be1d5eea 7905static void i40e_vsi_clear_rings(struct i40e_vsi *vsi)
9f65e15b
AD
7906{
7907 int i;
7908
8e9dca53 7909 if (vsi->tx_rings && vsi->tx_rings[0]) {
d7397644 7910 for (i = 0; i < vsi->alloc_queue_pairs; i++) {
00403f04
MW
7911 kfree_rcu(vsi->tx_rings[i], rcu);
7912 vsi->tx_rings[i] = NULL;
7913 vsi->rx_rings[i] = NULL;
74608d17
BT
7914 if (vsi->xdp_rings)
7915 vsi->xdp_rings[i] = NULL;
00403f04 7916 }
be1d5eea 7917 }
9f65e15b
AD
7918}
7919
41c445ff
JB
7920/**
7921 * i40e_alloc_rings - Allocates the Rx and Tx rings for the provided VSI
7922 * @vsi: the VSI being configured
7923 **/
7924static int i40e_alloc_rings(struct i40e_vsi *vsi)
7925{
74608d17 7926 int i, qpv = i40e_enabled_xdp_vsi(vsi) ? 3 : 2;
41c445ff 7927 struct i40e_pf *pf = vsi->back;
74608d17 7928 struct i40e_ring *ring;
41c445ff 7929
41c445ff 7930 /* Set basic values in the rings to be used later during open() */
d7397644 7931 for (i = 0; i < vsi->alloc_queue_pairs; i++) {
ac6c5e3d 7932 /* allocate space for both Tx and Rx in one shot */
74608d17
BT
7933 ring = kcalloc(qpv, sizeof(struct i40e_ring), GFP_KERNEL);
7934 if (!ring)
9f65e15b 7935 goto err_out;
41c445ff 7936
74608d17
BT
7937 ring->queue_index = i;
7938 ring->reg_idx = vsi->base_queue + i;
7939 ring->ring_active = false;
7940 ring->vsi = vsi;
7941 ring->netdev = vsi->netdev;
7942 ring->dev = &pf->pdev->dev;
7943 ring->count = vsi->num_desc;
7944 ring->size = 0;
7945 ring->dcb_tc = 0;
d36e41dc 7946 if (vsi->back->hw_features & I40E_HW_WB_ON_ITR_CAPABLE)
74608d17
BT
7947 ring->flags = I40E_TXR_FLAGS_WB_ON_ITR;
7948 ring->tx_itr_setting = pf->tx_itr_default;
7949 vsi->tx_rings[i] = ring++;
7950
7951 if (!i40e_enabled_xdp_vsi(vsi))
7952 goto setup_rx;
7953
7954 ring->queue_index = vsi->alloc_queue_pairs + i;
7955 ring->reg_idx = vsi->base_queue + ring->queue_index;
7956 ring->ring_active = false;
7957 ring->vsi = vsi;
7958 ring->netdev = NULL;
7959 ring->dev = &pf->pdev->dev;
7960 ring->count = vsi->num_desc;
7961 ring->size = 0;
7962 ring->dcb_tc = 0;
d36e41dc 7963 if (vsi->back->hw_features & I40E_HW_WB_ON_ITR_CAPABLE)
74608d17
BT
7964 ring->flags = I40E_TXR_FLAGS_WB_ON_ITR;
7965 set_ring_xdp(ring);
7966 ring->tx_itr_setting = pf->tx_itr_default;
7967 vsi->xdp_rings[i] = ring++;
7968
7969setup_rx:
7970 ring->queue_index = i;
7971 ring->reg_idx = vsi->base_queue + i;
7972 ring->ring_active = false;
7973 ring->vsi = vsi;
7974 ring->netdev = vsi->netdev;
7975 ring->dev = &pf->pdev->dev;
7976 ring->count = vsi->num_desc;
7977 ring->size = 0;
7978 ring->dcb_tc = 0;
7979 ring->rx_itr_setting = pf->rx_itr_default;
7980 vsi->rx_rings[i] = ring;
41c445ff
JB
7981 }
7982
7983 return 0;
9f65e15b
AD
7984
7985err_out:
7986 i40e_vsi_clear_rings(vsi);
7987 return -ENOMEM;
41c445ff
JB
7988}
7989
7990/**
7991 * i40e_reserve_msix_vectors - Reserve MSI-X vectors in the kernel
7992 * @pf: board private structure
7993 * @vectors: the number of MSI-X vectors to request
7994 *
7995 * Returns the number of vectors reserved, or error
7996 **/
7997static int i40e_reserve_msix_vectors(struct i40e_pf *pf, int vectors)
7998{
7b37f376
AG
7999 vectors = pci_enable_msix_range(pf->pdev, pf->msix_entries,
8000 I40E_MIN_MSIX, vectors);
8001 if (vectors < 0) {
41c445ff 8002 dev_info(&pf->pdev->dev,
7b37f376 8003 "MSI-X vector reservation failed: %d\n", vectors);
41c445ff
JB
8004 vectors = 0;
8005 }
8006
8007 return vectors;
8008}
8009
8010/**
8011 * i40e_init_msix - Setup the MSIX capability
8012 * @pf: board private structure
8013 *
8014 * Work with the OS to set up the MSIX vectors needed.
8015 *
3b444399 8016 * Returns the number of vectors reserved or negative on failure
41c445ff
JB
8017 **/
8018static int i40e_init_msix(struct i40e_pf *pf)
8019{
41c445ff 8020 struct i40e_hw *hw = &pf->hw;
c0cf70a6 8021 int cpus, extra_vectors;
1e200e4a 8022 int vectors_left;
41c445ff 8023 int v_budget, i;
3b444399 8024 int v_actual;
e3219ce6 8025 int iwarp_requested = 0;
41c445ff
JB
8026
8027 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED))
8028 return -ENODEV;
8029
8030 /* The number of vectors we'll request will be comprised of:
8031 * - Add 1 for "other" cause for Admin Queue events, etc.
8032 * - The number of LAN queue pairs
f8ff1464
ASJ
8033 * - Queues being used for RSS.
8034 * We don't need as many as max_rss_size vectors.
8035 * use rss_size instead in the calculation since that
8036 * is governed by number of cpus in the system.
8037 * - assumes symmetric Tx/Rx pairing
41c445ff 8038 * - The number of VMDq pairs
e3219ce6 8039 * - The CPU count within the NUMA node if iWARP is enabled
41c445ff
JB
8040 * Once we count this up, try the request.
8041 *
8042 * If we can't get what we want, we'll simplify to nearly nothing
8043 * and try again. If that still fails, we punt.
8044 */
1e200e4a
SN
8045 vectors_left = hw->func_caps.num_msix_vectors;
8046 v_budget = 0;
8047
8048 /* reserve one vector for miscellaneous handler */
8049 if (vectors_left) {
8050 v_budget++;
8051 vectors_left--;
8052 }
8053
c0cf70a6
JK
8054 /* reserve some vectors for the main PF traffic queues. Initially we
8055 * only reserve at most 50% of the available vectors, in the case that
8056 * the number of online CPUs is large. This ensures that we can enable
8057 * extra features as well. Once we've enabled the other features, we
8058 * will use any remaining vectors to reach as close as we can to the
8059 * number of online CPUs.
8060 */
8061 cpus = num_online_cpus();
8062 pf->num_lan_msix = min_t(int, cpus, vectors_left / 2);
1e200e4a 8063 vectors_left -= pf->num_lan_msix;
1e200e4a
SN
8064
8065 /* reserve one vector for sideband flow director */
8066 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
8067 if (vectors_left) {
a70e407f 8068 pf->num_fdsb_msix = 1;
1e200e4a
SN
8069 v_budget++;
8070 vectors_left--;
8071 } else {
a70e407f 8072 pf->num_fdsb_msix = 0;
1e200e4a
SN
8073 }
8074 }
83840e4b 8075
e3219ce6
ASJ
8076 /* can we reserve enough for iWARP? */
8077 if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
4ce20abc
SA
8078 iwarp_requested = pf->num_iwarp_msix;
8079
e3219ce6
ASJ
8080 if (!vectors_left)
8081 pf->num_iwarp_msix = 0;
8082 else if (vectors_left < pf->num_iwarp_msix)
8083 pf->num_iwarp_msix = 1;
8084 v_budget += pf->num_iwarp_msix;
8085 vectors_left -= pf->num_iwarp_msix;
8086 }
8087
1e200e4a
SN
8088 /* any vectors left over go for VMDq support */
8089 if (pf->flags & I40E_FLAG_VMDQ_ENABLED) {
8090 int vmdq_vecs_wanted = pf->num_vmdq_vsis * pf->num_vmdq_qps;
8091 int vmdq_vecs = min_t(int, vectors_left, vmdq_vecs_wanted);
8092
9ca57e97
SA
8093 if (!vectors_left) {
8094 pf->num_vmdq_msix = 0;
8095 pf->num_vmdq_qps = 0;
8096 } else {
8097 /* if we're short on vectors for what's desired, we limit
8098 * the queues per vmdq. If this is still more than are
8099 * available, the user will need to change the number of
8100 * queues/vectors used by the PF later with the ethtool
8101 * channels command
8102 */
8103 if (vmdq_vecs < vmdq_vecs_wanted)
8104 pf->num_vmdq_qps = 1;
8105 pf->num_vmdq_msix = pf->num_vmdq_qps;
1e200e4a 8106
9ca57e97
SA
8107 v_budget += vmdq_vecs;
8108 vectors_left -= vmdq_vecs;
8109 }
1e200e4a 8110 }
41c445ff 8111
c0cf70a6
JK
8112 /* On systems with a large number of SMP cores, we previously limited
8113 * the number of vectors for num_lan_msix to be at most 50% of the
8114 * available vectors, to allow for other features. Now, we add back
8115 * the remaining vectors. However, we ensure that the total
8116 * num_lan_msix will not exceed num_online_cpus(). To do this, we
8117 * calculate the number of vectors we can add without going over the
8118 * cap of CPUs. For systems with a small number of CPUs this will be
8119 * zero.
8120 */
8121 extra_vectors = min_t(int, cpus - pf->num_lan_msix, vectors_left);
8122 pf->num_lan_msix += extra_vectors;
8123 vectors_left -= extra_vectors;
8124
8125 WARN(vectors_left < 0,
8126 "Calculation of remaining vectors underflowed. This is an accounting bug when determining total MSI-X vectors.\n");
8127
8128 v_budget += pf->num_lan_msix;
41c445ff
JB
8129 pf->msix_entries = kcalloc(v_budget, sizeof(struct msix_entry),
8130 GFP_KERNEL);
8131 if (!pf->msix_entries)
8132 return -ENOMEM;
8133
8134 for (i = 0; i < v_budget; i++)
8135 pf->msix_entries[i].entry = i;
3b444399 8136 v_actual = i40e_reserve_msix_vectors(pf, v_budget);
a34977ba 8137
3b444399 8138 if (v_actual < I40E_MIN_MSIX) {
41c445ff
JB
8139 pf->flags &= ~I40E_FLAG_MSIX_ENABLED;
8140 kfree(pf->msix_entries);
8141 pf->msix_entries = NULL;
4c95aa5d 8142 pci_disable_msix(pf->pdev);
41c445ff
JB
8143 return -ENODEV;
8144
3b444399 8145 } else if (v_actual == I40E_MIN_MSIX) {
41c445ff 8146 /* Adjust for minimal MSIX use */
41c445ff
JB
8147 pf->num_vmdq_vsis = 0;
8148 pf->num_vmdq_qps = 0;
41c445ff
JB
8149 pf->num_lan_qps = 1;
8150 pf->num_lan_msix = 1;
8151
4ce20abc
SA
8152 } else if (!vectors_left) {
8153 /* If we have limited resources, we will start with no vectors
8154 * for the special features and then allocate vectors to some
8155 * of these features based on the policy and at the end disable
8156 * the features that did not get any vectors.
8157 */
3b444399
SN
8158 int vec;
8159
4ce20abc
SA
8160 dev_info(&pf->pdev->dev,
8161 "MSI-X vector limit reached, attempting to redistribute vectors\n");
a34977ba 8162 /* reserve the misc vector */
3b444399 8163 vec = v_actual - 1;
a34977ba 8164
41c445ff
JB
8165 /* Scale vector usage down */
8166 pf->num_vmdq_msix = 1; /* force VMDqs to only one vector */
a34977ba 8167 pf->num_vmdq_vsis = 1;
1e200e4a 8168 pf->num_vmdq_qps = 1;
41c445ff
JB
8169
8170 /* partition out the remaining vectors */
8171 switch (vec) {
8172 case 2:
41c445ff
JB
8173 pf->num_lan_msix = 1;
8174 break;
8175 case 3:
e3219ce6
ASJ
8176 if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
8177 pf->num_lan_msix = 1;
8178 pf->num_iwarp_msix = 1;
8179 } else {
8180 pf->num_lan_msix = 2;
8181 }
41c445ff
JB
8182 break;
8183 default:
e3219ce6
ASJ
8184 if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
8185 pf->num_iwarp_msix = min_t(int, (vec / 3),
8186 iwarp_requested);
8187 pf->num_vmdq_vsis = min_t(int, (vec / 3),
8188 I40E_DEFAULT_NUM_VMDQ_VSI);
8189 } else {
8190 pf->num_vmdq_vsis = min_t(int, (vec / 2),
8191 I40E_DEFAULT_NUM_VMDQ_VSI);
8192 }
abd97a94
SA
8193 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
8194 pf->num_fdsb_msix = 1;
8195 vec--;
8196 }
e3219ce6
ASJ
8197 pf->num_lan_msix = min_t(int,
8198 (vec - (pf->num_iwarp_msix + pf->num_vmdq_vsis)),
8199 pf->num_lan_msix);
4ce20abc 8200 pf->num_lan_qps = pf->num_lan_msix;
41c445ff
JB
8201 break;
8202 }
8203 }
8204
abd97a94
SA
8205 if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
8206 (pf->num_fdsb_msix == 0)) {
8207 dev_info(&pf->pdev->dev, "Sideband Flowdir disabled, not enough MSI-X vectors\n");
8208 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
8209 }
a34977ba
ASJ
8210 if ((pf->flags & I40E_FLAG_VMDQ_ENABLED) &&
8211 (pf->num_vmdq_msix == 0)) {
8212 dev_info(&pf->pdev->dev, "VMDq disabled, not enough MSI-X vectors\n");
8213 pf->flags &= ~I40E_FLAG_VMDQ_ENABLED;
8214 }
e3219ce6
ASJ
8215
8216 if ((pf->flags & I40E_FLAG_IWARP_ENABLED) &&
8217 (pf->num_iwarp_msix == 0)) {
8218 dev_info(&pf->pdev->dev, "IWARP disabled, not enough MSI-X vectors\n");
8219 pf->flags &= ~I40E_FLAG_IWARP_ENABLED;
8220 }
4ce20abc
SA
8221 i40e_debug(&pf->hw, I40E_DEBUG_INIT,
8222 "MSI-X vector distribution: PF %d, VMDq %d, FDSB %d, iWARP %d\n",
8223 pf->num_lan_msix,
8224 pf->num_vmdq_msix * pf->num_vmdq_vsis,
8225 pf->num_fdsb_msix,
8226 pf->num_iwarp_msix);
8227
3b444399 8228 return v_actual;
41c445ff
JB
8229}
8230
493fb300 8231/**
90e04070 8232 * i40e_vsi_alloc_q_vector - Allocate memory for a single interrupt vector
493fb300
AD
8233 * @vsi: the VSI being configured
8234 * @v_idx: index of the vector in the vsi struct
7f6c5539 8235 * @cpu: cpu to be used on affinity_mask
493fb300
AD
8236 *
8237 * We allocate one q_vector. If allocation fails we return -ENOMEM.
8238 **/
7f6c5539 8239static int i40e_vsi_alloc_q_vector(struct i40e_vsi *vsi, int v_idx, int cpu)
493fb300
AD
8240{
8241 struct i40e_q_vector *q_vector;
8242
8243 /* allocate q_vector */
8244 q_vector = kzalloc(sizeof(struct i40e_q_vector), GFP_KERNEL);
8245 if (!q_vector)
8246 return -ENOMEM;
8247
8248 q_vector->vsi = vsi;
8249 q_vector->v_idx = v_idx;
759dc4a7 8250 cpumask_copy(&q_vector->affinity_mask, cpu_possible_mask);
7f6c5539 8251
493fb300
AD
8252 if (vsi->netdev)
8253 netif_napi_add(vsi->netdev, &q_vector->napi,
eefeacee 8254 i40e_napi_poll, NAPI_POLL_WEIGHT);
493fb300 8255
cd0b6fa6
AD
8256 q_vector->rx.latency_range = I40E_LOW_LATENCY;
8257 q_vector->tx.latency_range = I40E_LOW_LATENCY;
8258
493fb300
AD
8259 /* tie q_vector and vsi together */
8260 vsi->q_vectors[v_idx] = q_vector;
8261
8262 return 0;
8263}
8264
41c445ff 8265/**
90e04070 8266 * i40e_vsi_alloc_q_vectors - Allocate memory for interrupt vectors
41c445ff
JB
8267 * @vsi: the VSI being configured
8268 *
8269 * We allocate one q_vector per queue interrupt. If allocation fails we
8270 * return -ENOMEM.
8271 **/
90e04070 8272static int i40e_vsi_alloc_q_vectors(struct i40e_vsi *vsi)
41c445ff
JB
8273{
8274 struct i40e_pf *pf = vsi->back;
7f6c5539 8275 int err, v_idx, num_q_vectors, current_cpu;
41c445ff
JB
8276
8277 /* if not MSIX, give the one vector only to the LAN VSI */
8278 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
8279 num_q_vectors = vsi->num_q_vectors;
8280 else if (vsi == pf->vsi[pf->lan_vsi])
8281 num_q_vectors = 1;
8282 else
8283 return -EINVAL;
8284
7f6c5539
GP
8285 current_cpu = cpumask_first(cpu_online_mask);
8286
41c445ff 8287 for (v_idx = 0; v_idx < num_q_vectors; v_idx++) {
7f6c5539 8288 err = i40e_vsi_alloc_q_vector(vsi, v_idx, current_cpu);
493fb300
AD
8289 if (err)
8290 goto err_out;
7f6c5539
GP
8291 current_cpu = cpumask_next(current_cpu, cpu_online_mask);
8292 if (unlikely(current_cpu >= nr_cpu_ids))
8293 current_cpu = cpumask_first(cpu_online_mask);
41c445ff
JB
8294 }
8295
8296 return 0;
493fb300
AD
8297
8298err_out:
8299 while (v_idx--)
8300 i40e_free_q_vector(vsi, v_idx);
8301
8302 return err;
41c445ff
JB
8303}
8304
8305/**
8306 * i40e_init_interrupt_scheme - Determine proper interrupt scheme
8307 * @pf: board private structure to initialize
8308 **/
c1147280 8309static int i40e_init_interrupt_scheme(struct i40e_pf *pf)
41c445ff 8310{
3b444399
SN
8311 int vectors = 0;
8312 ssize_t size;
41c445ff
JB
8313
8314 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
3b444399
SN
8315 vectors = i40e_init_msix(pf);
8316 if (vectors < 0) {
60ea5f83 8317 pf->flags &= ~(I40E_FLAG_MSIX_ENABLED |
e3219ce6 8318 I40E_FLAG_IWARP_ENABLED |
60ea5f83 8319 I40E_FLAG_RSS_ENABLED |
4d9b6043 8320 I40E_FLAG_DCB_CAPABLE |
a036244c 8321 I40E_FLAG_DCB_ENABLED |
60ea5f83
JB
8322 I40E_FLAG_SRIOV_ENABLED |
8323 I40E_FLAG_FD_SB_ENABLED |
8324 I40E_FLAG_FD_ATR_ENABLED |
8325 I40E_FLAG_VMDQ_ENABLED);
41c445ff
JB
8326
8327 /* rework the queue expectations without MSIX */
8328 i40e_determine_queue_usage(pf);
8329 }
8330 }
8331
8332 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED) &&
8333 (pf->flags & I40E_FLAG_MSI_ENABLED)) {
77fa28be 8334 dev_info(&pf->pdev->dev, "MSI-X not available, trying MSI\n");
3b444399
SN
8335 vectors = pci_enable_msi(pf->pdev);
8336 if (vectors < 0) {
8337 dev_info(&pf->pdev->dev, "MSI init failed - %d\n",
8338 vectors);
41c445ff
JB
8339 pf->flags &= ~I40E_FLAG_MSI_ENABLED;
8340 }
3b444399 8341 vectors = 1; /* one MSI or Legacy vector */
41c445ff
JB
8342 }
8343
958a3e3b 8344 if (!(pf->flags & (I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED)))
77fa28be 8345 dev_info(&pf->pdev->dev, "MSI-X and MSI not available, falling back to Legacy IRQ\n");
958a3e3b 8346
3b444399
SN
8347 /* set up vector assignment tracking */
8348 size = sizeof(struct i40e_lump_tracking) + (sizeof(u16) * vectors);
8349 pf->irq_pile = kzalloc(size, GFP_KERNEL);
c1147280
JB
8350 if (!pf->irq_pile) {
8351 dev_err(&pf->pdev->dev, "error allocating irq_pile memory\n");
8352 return -ENOMEM;
8353 }
3b444399
SN
8354 pf->irq_pile->num_entries = vectors;
8355 pf->irq_pile->search_hint = 0;
8356
c1147280 8357 /* track first vector for misc interrupts, ignore return */
3b444399 8358 (void)i40e_get_lump(pf, pf->irq_pile, 1, I40E_PILE_VALID_BIT - 1);
c1147280
JB
8359
8360 return 0;
41c445ff
JB
8361}
8362
b980c063
JK
8363#ifdef CONFIG_PM
8364/**
8365 * i40e_restore_interrupt_scheme - Restore the interrupt scheme
8366 * @pf: private board data structure
8367 *
8368 * Restore the interrupt scheme that was cleared when we suspended the
8369 * device. This should be called during resume to re-allocate the q_vectors
8370 * and reacquire IRQs.
8371 */
8372static int i40e_restore_interrupt_scheme(struct i40e_pf *pf)
8373{
8374 int err, i;
8375
8376 /* We cleared the MSI and MSI-X flags when disabling the old interrupt
8377 * scheme. We need to re-enabled them here in order to attempt to
8378 * re-acquire the MSI or MSI-X vectors
8379 */
8380 pf->flags |= (I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED);
8381
8382 err = i40e_init_interrupt_scheme(pf);
8383 if (err)
8384 return err;
8385
8386 /* Now that we've re-acquired IRQs, we need to remap the vectors and
8387 * rings together again.
8388 */
8389 for (i = 0; i < pf->num_alloc_vsi; i++) {
8390 if (pf->vsi[i]) {
8391 err = i40e_vsi_alloc_q_vectors(pf->vsi[i]);
8392 if (err)
8393 goto err_unwind;
8394 i40e_vsi_map_rings_to_vectors(pf->vsi[i]);
8395 }
8396 }
8397
8398 err = i40e_setup_misc_vector(pf);
8399 if (err)
8400 goto err_unwind;
8401
8402 return 0;
8403
8404err_unwind:
8405 while (i--) {
8406 if (pf->vsi[i])
8407 i40e_vsi_free_q_vectors(pf->vsi[i]);
8408 }
8409
8410 return err;
8411}
8412#endif /* CONFIG_PM */
8413
41c445ff
JB
8414/**
8415 * i40e_setup_misc_vector - Setup the misc vector to handle non queue events
8416 * @pf: board private structure
8417 *
8418 * This sets up the handler for MSIX 0, which is used to manage the
8419 * non-queue interrupts, e.g. AdminQ and errors. This is not used
8420 * when in MSI or Legacy interrupt mode.
8421 **/
8422static int i40e_setup_misc_vector(struct i40e_pf *pf)
8423{
8424 struct i40e_hw *hw = &pf->hw;
8425 int err = 0;
8426
c17401a1
JK
8427 /* Only request the IRQ once, the first time through. */
8428 if (!test_and_set_bit(__I40E_MISC_IRQ_REQUESTED, pf->state)) {
41c445ff 8429 err = request_irq(pf->msix_entries[0].vector,
b294ac70 8430 i40e_intr, 0, pf->int_name, pf);
41c445ff 8431 if (err) {
c17401a1 8432 clear_bit(__I40E_MISC_IRQ_REQUESTED, pf->state);
41c445ff 8433 dev_info(&pf->pdev->dev,
77fa28be 8434 "request_irq for %s failed: %d\n",
b294ac70 8435 pf->int_name, err);
41c445ff
JB
8436 return -EFAULT;
8437 }
8438 }
8439
ab437b5a 8440 i40e_enable_misc_int_causes(pf);
41c445ff
JB
8441
8442 /* associate no queues to the misc vector */
8443 wr32(hw, I40E_PFINT_LNKLST0, I40E_QUEUE_END_OF_LIST);
8444 wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), I40E_ITR_8K);
8445
8446 i40e_flush(hw);
8447
40d72a50 8448 i40e_irq_dynamic_enable_icr0(pf, true);
41c445ff
JB
8449
8450 return err;
8451}
8452
8453/**
e25d00b8
ASJ
8454 * i40e_config_rss_aq - Prepare for RSS using AQ commands
8455 * @vsi: vsi structure
8456 * @seed: RSS hash seed
8457 **/
e69ff813
HZ
8458static int i40e_config_rss_aq(struct i40e_vsi *vsi, const u8 *seed,
8459 u8 *lut, u16 lut_size)
e25d00b8 8460{
e25d00b8
ASJ
8461 struct i40e_pf *pf = vsi->back;
8462 struct i40e_hw *hw = &pf->hw;
776b2e15 8463 int ret = 0;
e25d00b8 8464
776b2e15
JK
8465 if (seed) {
8466 struct i40e_aqc_get_set_rss_key_data *seed_dw =
8467 (struct i40e_aqc_get_set_rss_key_data *)seed;
8468 ret = i40e_aq_set_rss_key(hw, vsi->id, seed_dw);
8469 if (ret) {
8470 dev_info(&pf->pdev->dev,
8471 "Cannot set RSS key, err %s aq_err %s\n",
8472 i40e_stat_str(hw, ret),
8473 i40e_aq_str(hw, hw->aq.asq_last_status));
8474 return ret;
8475 }
e25d00b8 8476 }
776b2e15
JK
8477 if (lut) {
8478 bool pf_lut = vsi->type == I40E_VSI_MAIN ? true : false;
e25d00b8 8479
776b2e15
JK
8480 ret = i40e_aq_set_rss_lut(hw, vsi->id, pf_lut, lut, lut_size);
8481 if (ret) {
8482 dev_info(&pf->pdev->dev,
8483 "Cannot set RSS lut, err %s aq_err %s\n",
8484 i40e_stat_str(hw, ret),
8485 i40e_aq_str(hw, hw->aq.asq_last_status));
8486 return ret;
8487 }
8488 }
e25d00b8
ASJ
8489 return ret;
8490}
8491
95a73780
ASJ
8492/**
8493 * i40e_get_rss_aq - Get RSS keys and lut by using AQ commands
8494 * @vsi: Pointer to vsi structure
8495 * @seed: Buffter to store the hash keys
8496 * @lut: Buffer to store the lookup table entries
8497 * @lut_size: Size of buffer to store the lookup table entries
8498 *
8499 * Return 0 on success, negative on failure
8500 */
8501static int i40e_get_rss_aq(struct i40e_vsi *vsi, const u8 *seed,
8502 u8 *lut, u16 lut_size)
8503{
8504 struct i40e_pf *pf = vsi->back;
8505 struct i40e_hw *hw = &pf->hw;
8506 int ret = 0;
8507
8508 if (seed) {
8509 ret = i40e_aq_get_rss_key(hw, vsi->id,
8510 (struct i40e_aqc_get_set_rss_key_data *)seed);
8511 if (ret) {
8512 dev_info(&pf->pdev->dev,
8513 "Cannot get RSS key, err %s aq_err %s\n",
8514 i40e_stat_str(&pf->hw, ret),
8515 i40e_aq_str(&pf->hw,
8516 pf->hw.aq.asq_last_status));
8517 return ret;
8518 }
8519 }
8520
8521 if (lut) {
8522 bool pf_lut = vsi->type == I40E_VSI_MAIN ? true : false;
8523
8524 ret = i40e_aq_get_rss_lut(hw, vsi->id, pf_lut, lut, lut_size);
8525 if (ret) {
8526 dev_info(&pf->pdev->dev,
8527 "Cannot get RSS lut, err %s aq_err %s\n",
8528 i40e_stat_str(&pf->hw, ret),
8529 i40e_aq_str(&pf->hw,
8530 pf->hw.aq.asq_last_status));
8531 return ret;
8532 }
8533 }
8534
8535 return ret;
8536}
8537
0582b964
JK
8538/**
8539 * i40e_vsi_config_rss - Prepare for VSI(VMDq) RSS if used
8540 * @vsi: VSI structure
8541 **/
8542static int i40e_vsi_config_rss(struct i40e_vsi *vsi)
8543{
8544 u8 seed[I40E_HKEY_ARRAY_SIZE];
8545 struct i40e_pf *pf = vsi->back;
8546 u8 *lut;
8547 int ret;
8548
d36e41dc 8549 if (!(pf->hw_features & I40E_HW_RSS_AQ_CAPABLE))
0582b964
JK
8550 return 0;
8551
552b9962
JK
8552 if (!vsi->rss_size)
8553 vsi->rss_size = min_t(int, pf->alloc_rss_size,
8554 vsi->num_queue_pairs);
8555 if (!vsi->rss_size)
8556 return -EINVAL;
8557
0582b964
JK
8558 lut = kzalloc(vsi->rss_table_size, GFP_KERNEL);
8559 if (!lut)
8560 return -ENOMEM;
552b9962
JK
8561 /* Use the user configured hash keys and lookup table if there is one,
8562 * otherwise use default
8563 */
8564 if (vsi->rss_lut_user)
8565 memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size);
8566 else
8567 i40e_fill_rss_lut(pf, lut, vsi->rss_table_size, vsi->rss_size);
8568 if (vsi->rss_hkey_user)
8569 memcpy(seed, vsi->rss_hkey_user, I40E_HKEY_ARRAY_SIZE);
8570 else
8571 netdev_rss_key_fill((void *)seed, I40E_HKEY_ARRAY_SIZE);
0582b964
JK
8572 ret = i40e_config_rss_aq(vsi, seed, lut, vsi->rss_table_size);
8573 kfree(lut);
8574
8575 return ret;
8576}
8577
e25d00b8 8578/**
043dd650 8579 * i40e_config_rss_reg - Configure RSS keys and lut by writing registers
e69ff813 8580 * @vsi: Pointer to vsi structure
e25d00b8 8581 * @seed: RSS hash seed
e69ff813
HZ
8582 * @lut: Lookup table
8583 * @lut_size: Lookup table size
8584 *
8585 * Returns 0 on success, negative on failure
41c445ff 8586 **/
e69ff813
HZ
8587static int i40e_config_rss_reg(struct i40e_vsi *vsi, const u8 *seed,
8588 const u8 *lut, u16 lut_size)
41c445ff 8589{
e69ff813 8590 struct i40e_pf *pf = vsi->back;
4617e8c0 8591 struct i40e_hw *hw = &pf->hw;
c4e1868c 8592 u16 vf_id = vsi->vf_id;
e69ff813 8593 u8 i;
41c445ff 8594
e25d00b8 8595 /* Fill out hash function seed */
e69ff813
HZ
8596 if (seed) {
8597 u32 *seed_dw = (u32 *)seed;
8598
c4e1868c
MW
8599 if (vsi->type == I40E_VSI_MAIN) {
8600 for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
26f77e53 8601 wr32(hw, I40E_PFQF_HKEY(i), seed_dw[i]);
c4e1868c
MW
8602 } else if (vsi->type == I40E_VSI_SRIOV) {
8603 for (i = 0; i <= I40E_VFQF_HKEY1_MAX_INDEX; i++)
26f77e53 8604 wr32(hw, I40E_VFQF_HKEY1(i, vf_id), seed_dw[i]);
c4e1868c
MW
8605 } else {
8606 dev_err(&pf->pdev->dev, "Cannot set RSS seed - invalid VSI type\n");
8607 }
e69ff813
HZ
8608 }
8609
8610 if (lut) {
8611 u32 *lut_dw = (u32 *)lut;
8612
c4e1868c
MW
8613 if (vsi->type == I40E_VSI_MAIN) {
8614 if (lut_size != I40E_HLUT_ARRAY_SIZE)
8615 return -EINVAL;
8616 for (i = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++)
8617 wr32(hw, I40E_PFQF_HLUT(i), lut_dw[i]);
8618 } else if (vsi->type == I40E_VSI_SRIOV) {
8619 if (lut_size != I40E_VF_HLUT_ARRAY_SIZE)
8620 return -EINVAL;
8621 for (i = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++)
26f77e53 8622 wr32(hw, I40E_VFQF_HLUT1(i, vf_id), lut_dw[i]);
c4e1868c
MW
8623 } else {
8624 dev_err(&pf->pdev->dev, "Cannot set RSS LUT - invalid VSI type\n");
8625 }
e25d00b8
ASJ
8626 }
8627 i40e_flush(hw);
8628
8629 return 0;
8630}
8631
043dd650
HZ
8632/**
8633 * i40e_get_rss_reg - Get the RSS keys and lut by reading registers
8634 * @vsi: Pointer to VSI structure
8635 * @seed: Buffer to store the keys
8636 * @lut: Buffer to store the lookup table entries
8637 * @lut_size: Size of buffer to store the lookup table entries
8638 *
8639 * Returns 0 on success, negative on failure
8640 */
8641static int i40e_get_rss_reg(struct i40e_vsi *vsi, u8 *seed,
8642 u8 *lut, u16 lut_size)
8643{
8644 struct i40e_pf *pf = vsi->back;
8645 struct i40e_hw *hw = &pf->hw;
8646 u16 i;
8647
8648 if (seed) {
8649 u32 *seed_dw = (u32 *)seed;
8650
8651 for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
272cdaf2 8652 seed_dw[i] = i40e_read_rx_ctl(hw, I40E_PFQF_HKEY(i));
043dd650
HZ
8653 }
8654 if (lut) {
8655 u32 *lut_dw = (u32 *)lut;
8656
8657 if (lut_size != I40E_HLUT_ARRAY_SIZE)
8658 return -EINVAL;
8659 for (i = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++)
8660 lut_dw[i] = rd32(hw, I40E_PFQF_HLUT(i));
8661 }
8662
8663 return 0;
8664}
8665
8666/**
8667 * i40e_config_rss - Configure RSS keys and lut
8668 * @vsi: Pointer to VSI structure
8669 * @seed: RSS hash seed
8670 * @lut: Lookup table
8671 * @lut_size: Lookup table size
8672 *
8673 * Returns 0 on success, negative on failure
8674 */
8675int i40e_config_rss(struct i40e_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size)
8676{
8677 struct i40e_pf *pf = vsi->back;
8678
d36e41dc 8679 if (pf->hw_features & I40E_HW_RSS_AQ_CAPABLE)
043dd650
HZ
8680 return i40e_config_rss_aq(vsi, seed, lut, lut_size);
8681 else
8682 return i40e_config_rss_reg(vsi, seed, lut, lut_size);
8683}
8684
8685/**
8686 * i40e_get_rss - Get RSS keys and lut
8687 * @vsi: Pointer to VSI structure
8688 * @seed: Buffer to store the keys
8689 * @lut: Buffer to store the lookup table entries
8690 * lut_size: Size of buffer to store the lookup table entries
8691 *
8692 * Returns 0 on success, negative on failure
8693 */
8694int i40e_get_rss(struct i40e_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size)
8695{
95a73780
ASJ
8696 struct i40e_pf *pf = vsi->back;
8697
d36e41dc 8698 if (pf->hw_features & I40E_HW_RSS_AQ_CAPABLE)
95a73780
ASJ
8699 return i40e_get_rss_aq(vsi, seed, lut, lut_size);
8700 else
8701 return i40e_get_rss_reg(vsi, seed, lut, lut_size);
043dd650
HZ
8702}
8703
e69ff813
HZ
8704/**
8705 * i40e_fill_rss_lut - Fill the RSS lookup table with default values
8706 * @pf: Pointer to board private structure
8707 * @lut: Lookup table
8708 * @rss_table_size: Lookup table size
8709 * @rss_size: Range of queue number for hashing
8710 */
f1582351
AB
8711void i40e_fill_rss_lut(struct i40e_pf *pf, u8 *lut,
8712 u16 rss_table_size, u16 rss_size)
e69ff813
HZ
8713{
8714 u16 i;
8715
8716 for (i = 0; i < rss_table_size; i++)
8717 lut[i] = i % rss_size;
8718}
8719
e25d00b8 8720/**
043dd650 8721 * i40e_pf_config_rss - Prepare for RSS if used
e25d00b8
ASJ
8722 * @pf: board private structure
8723 **/
043dd650 8724static int i40e_pf_config_rss(struct i40e_pf *pf)
e25d00b8
ASJ
8725{
8726 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
8727 u8 seed[I40E_HKEY_ARRAY_SIZE];
e69ff813 8728 u8 *lut;
e25d00b8
ASJ
8729 struct i40e_hw *hw = &pf->hw;
8730 u32 reg_val;
8731 u64 hena;
e69ff813 8732 int ret;
e25d00b8 8733
41c445ff 8734 /* By default we enable TCP/UDP with IPv4/IPv6 ptypes */
272cdaf2
SN
8735 hena = (u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)) |
8736 ((u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1)) << 32);
e25d00b8
ASJ
8737 hena |= i40e_pf_get_default_rss_hena(pf);
8738
272cdaf2
SN
8739 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (u32)hena);
8740 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32));
41c445ff 8741
e25d00b8 8742 /* Determine the RSS table size based on the hardware capabilities */
272cdaf2 8743 reg_val = i40e_read_rx_ctl(hw, I40E_PFQF_CTL_0);
e25d00b8
ASJ
8744 reg_val = (pf->rss_table_size == 512) ?
8745 (reg_val | I40E_PFQF_CTL_0_HASHLUTSIZE_512) :
8746 (reg_val & ~I40E_PFQF_CTL_0_HASHLUTSIZE_512);
272cdaf2 8747 i40e_write_rx_ctl(hw, I40E_PFQF_CTL_0, reg_val);
e157ea30 8748
28c5869f 8749 /* Determine the RSS size of the VSI */
f25571b5
HR
8750 if (!vsi->rss_size) {
8751 u16 qcount;
8752
8753 qcount = vsi->num_queue_pairs / vsi->tc_config.numtc;
8754 vsi->rss_size = min_t(int, pf->alloc_rss_size, qcount);
8755 }
a4fa59cc
MW
8756 if (!vsi->rss_size)
8757 return -EINVAL;
28c5869f 8758
e69ff813
HZ
8759 lut = kzalloc(vsi->rss_table_size, GFP_KERNEL);
8760 if (!lut)
8761 return -ENOMEM;
8762
28c5869f
HZ
8763 /* Use user configured lut if there is one, otherwise use default */
8764 if (vsi->rss_lut_user)
8765 memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size);
8766 else
8767 i40e_fill_rss_lut(pf, lut, vsi->rss_table_size, vsi->rss_size);
e69ff813 8768
28c5869f
HZ
8769 /* Use user configured hash key if there is one, otherwise
8770 * use default.
8771 */
8772 if (vsi->rss_hkey_user)
8773 memcpy(seed, vsi->rss_hkey_user, I40E_HKEY_ARRAY_SIZE);
8774 else
8775 netdev_rss_key_fill((void *)seed, I40E_HKEY_ARRAY_SIZE);
043dd650 8776 ret = i40e_config_rss(vsi, seed, lut, vsi->rss_table_size);
e69ff813
HZ
8777 kfree(lut);
8778
8779 return ret;
41c445ff
JB
8780}
8781
f8ff1464
ASJ
8782/**
8783 * i40e_reconfig_rss_queues - change number of queues for rss and rebuild
8784 * @pf: board private structure
8785 * @queue_count: the requested queue count for rss.
8786 *
8787 * returns 0 if rss is not enabled, if enabled returns the final rss queue
8788 * count which may be different from the requested queue count.
373149fc 8789 * Note: expects to be called while under rtnl_lock()
f8ff1464
ASJ
8790 **/
8791int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count)
8792{
9a3bd2f1
ASJ
8793 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
8794 int new_rss_size;
8795
f8ff1464
ASJ
8796 if (!(pf->flags & I40E_FLAG_RSS_ENABLED))
8797 return 0;
8798
9a3bd2f1 8799 new_rss_size = min_t(int, queue_count, pf->rss_size_max);
f8ff1464 8800
9a3bd2f1 8801 if (queue_count != vsi->num_queue_pairs) {
f25571b5
HR
8802 u16 qcount;
8803
9a3bd2f1 8804 vsi->req_queue_pairs = queue_count;
373149fc 8805 i40e_prep_for_reset(pf, true);
f8ff1464 8806
acd65448 8807 pf->alloc_rss_size = new_rss_size;
f8ff1464 8808
373149fc 8809 i40e_reset_and_rebuild(pf, true, true);
28c5869f
HZ
8810
8811 /* Discard the user configured hash keys and lut, if less
8812 * queues are enabled.
8813 */
8814 if (queue_count < vsi->rss_size) {
8815 i40e_clear_rss_config_user(vsi);
8816 dev_dbg(&pf->pdev->dev,
8817 "discard user configured hash keys and lut\n");
8818 }
8819
8820 /* Reset vsi->rss_size, as number of enabled queues changed */
f25571b5
HR
8821 qcount = vsi->num_queue_pairs / vsi->tc_config.numtc;
8822 vsi->rss_size = min_t(int, pf->alloc_rss_size, qcount);
28c5869f 8823
043dd650 8824 i40e_pf_config_rss(pf);
f8ff1464 8825 }
12815057
LY
8826 dev_info(&pf->pdev->dev, "User requested queue count/HW max RSS count: %d/%d\n",
8827 vsi->req_queue_pairs, pf->rss_size_max);
acd65448 8828 return pf->alloc_rss_size;
f8ff1464
ASJ
8829}
8830
f4492db1 8831/**
4fc8c676 8832 * i40e_get_partition_bw_setting - Retrieve BW settings for this PF partition
f4492db1
GR
8833 * @pf: board private structure
8834 **/
4fc8c676 8835i40e_status i40e_get_partition_bw_setting(struct i40e_pf *pf)
f4492db1
GR
8836{
8837 i40e_status status;
8838 bool min_valid, max_valid;
8839 u32 max_bw, min_bw;
8840
8841 status = i40e_read_bw_from_alt_ram(&pf->hw, &max_bw, &min_bw,
8842 &min_valid, &max_valid);
8843
8844 if (!status) {
8845 if (min_valid)
4fc8c676 8846 pf->min_bw = min_bw;
f4492db1 8847 if (max_valid)
4fc8c676 8848 pf->max_bw = max_bw;
f4492db1
GR
8849 }
8850
8851 return status;
8852}
8853
8854/**
4fc8c676 8855 * i40e_set_partition_bw_setting - Set BW settings for this PF partition
f4492db1
GR
8856 * @pf: board private structure
8857 **/
4fc8c676 8858i40e_status i40e_set_partition_bw_setting(struct i40e_pf *pf)
f4492db1
GR
8859{
8860 struct i40e_aqc_configure_partition_bw_data bw_data;
8861 i40e_status status;
8862
b40c82e6 8863 /* Set the valid bit for this PF */
41a1d04b 8864 bw_data.pf_valid_bits = cpu_to_le16(BIT(pf->hw.pf_id));
4fc8c676
SN
8865 bw_data.max_bw[pf->hw.pf_id] = pf->max_bw & I40E_ALT_BW_VALUE_MASK;
8866 bw_data.min_bw[pf->hw.pf_id] = pf->min_bw & I40E_ALT_BW_VALUE_MASK;
f4492db1
GR
8867
8868 /* Set the new bandwidths */
8869 status = i40e_aq_configure_partition_bw(&pf->hw, &bw_data, NULL);
8870
8871 return status;
8872}
8873
8874/**
4fc8c676 8875 * i40e_commit_partition_bw_setting - Commit BW settings for this PF partition
f4492db1
GR
8876 * @pf: board private structure
8877 **/
4fc8c676 8878i40e_status i40e_commit_partition_bw_setting(struct i40e_pf *pf)
f4492db1
GR
8879{
8880 /* Commit temporary BW setting to permanent NVM image */
8881 enum i40e_admin_queue_err last_aq_status;
8882 i40e_status ret;
8883 u16 nvm_word;
8884
8885 if (pf->hw.partition_id != 1) {
8886 dev_info(&pf->pdev->dev,
8887 "Commit BW only works on partition 1! This is partition %d",
8888 pf->hw.partition_id);
8889 ret = I40E_NOT_SUPPORTED;
8890 goto bw_commit_out;
8891 }
8892
8893 /* Acquire NVM for read access */
8894 ret = i40e_acquire_nvm(&pf->hw, I40E_RESOURCE_READ);
8895 last_aq_status = pf->hw.aq.asq_last_status;
8896 if (ret) {
8897 dev_info(&pf->pdev->dev,
f1c7e72e
SN
8898 "Cannot acquire NVM for read access, err %s aq_err %s\n",
8899 i40e_stat_str(&pf->hw, ret),
8900 i40e_aq_str(&pf->hw, last_aq_status));
f4492db1
GR
8901 goto bw_commit_out;
8902 }
8903
8904 /* Read word 0x10 of NVM - SW compatibility word 1 */
8905 ret = i40e_aq_read_nvm(&pf->hw,
8906 I40E_SR_NVM_CONTROL_WORD,
8907 0x10, sizeof(nvm_word), &nvm_word,
8908 false, NULL);
8909 /* Save off last admin queue command status before releasing
8910 * the NVM
8911 */
8912 last_aq_status = pf->hw.aq.asq_last_status;
8913 i40e_release_nvm(&pf->hw);
8914 if (ret) {
f1c7e72e
SN
8915 dev_info(&pf->pdev->dev, "NVM read error, err %s aq_err %s\n",
8916 i40e_stat_str(&pf->hw, ret),
8917 i40e_aq_str(&pf->hw, last_aq_status));
f4492db1
GR
8918 goto bw_commit_out;
8919 }
8920
8921 /* Wait a bit for NVM release to complete */
8922 msleep(50);
8923
8924 /* Acquire NVM for write access */
8925 ret = i40e_acquire_nvm(&pf->hw, I40E_RESOURCE_WRITE);
8926 last_aq_status = pf->hw.aq.asq_last_status;
8927 if (ret) {
8928 dev_info(&pf->pdev->dev,
f1c7e72e
SN
8929 "Cannot acquire NVM for write access, err %s aq_err %s\n",
8930 i40e_stat_str(&pf->hw, ret),
8931 i40e_aq_str(&pf->hw, last_aq_status));
f4492db1
GR
8932 goto bw_commit_out;
8933 }
8934 /* Write it back out unchanged to initiate update NVM,
8935 * which will force a write of the shadow (alt) RAM to
8936 * the NVM - thus storing the bandwidth values permanently.
8937 */
8938 ret = i40e_aq_update_nvm(&pf->hw,
8939 I40E_SR_NVM_CONTROL_WORD,
8940 0x10, sizeof(nvm_word),
8941 &nvm_word, true, NULL);
8942 /* Save off last admin queue command status before releasing
8943 * the NVM
8944 */
8945 last_aq_status = pf->hw.aq.asq_last_status;
8946 i40e_release_nvm(&pf->hw);
8947 if (ret)
8948 dev_info(&pf->pdev->dev,
f1c7e72e
SN
8949 "BW settings NOT SAVED, err %s aq_err %s\n",
8950 i40e_stat_str(&pf->hw, ret),
8951 i40e_aq_str(&pf->hw, last_aq_status));
f4492db1
GR
8952bw_commit_out:
8953
8954 return ret;
8955}
8956
41c445ff
JB
8957/**
8958 * i40e_sw_init - Initialize general software structures (struct i40e_pf)
8959 * @pf: board private structure to initialize
8960 *
8961 * i40e_sw_init initializes the Adapter private data structure.
8962 * Fields are initialized based on PCI device information and
8963 * OS network device settings (MTU size).
8964 **/
8965static int i40e_sw_init(struct i40e_pf *pf)
8966{
8967 int err = 0;
8968 int size;
8969
41c445ff
JB
8970 /* Set default capability flags */
8971 pf->flags = I40E_FLAG_RX_CSUM_ENABLED |
8972 I40E_FLAG_MSI_ENABLED |
2bc7ee8a
MW
8973 I40E_FLAG_MSIX_ENABLED;
8974
ca99eb99
MW
8975 /* Set default ITR */
8976 pf->rx_itr_default = I40E_ITR_DYNAMIC | I40E_ITR_RX_DEF;
8977 pf->tx_itr_default = I40E_ITR_DYNAMIC | I40E_ITR_TX_DEF;
8978
7134f9ce
JB
8979 /* Depending on PF configurations, it is possible that the RSS
8980 * maximum might end up larger than the available queues
8981 */
41a1d04b 8982 pf->rss_size_max = BIT(pf->hw.func_caps.rss_table_entry_width);
acd65448 8983 pf->alloc_rss_size = 1;
5db4cb59 8984 pf->rss_table_size = pf->hw.func_caps.rss_table_size;
7134f9ce
JB
8985 pf->rss_size_max = min_t(int, pf->rss_size_max,
8986 pf->hw.func_caps.num_tx_qp);
41c445ff
JB
8987 if (pf->hw.func_caps.rss) {
8988 pf->flags |= I40E_FLAG_RSS_ENABLED;
acd65448
HZ
8989 pf->alloc_rss_size = min_t(int, pf->rss_size_max,
8990 num_online_cpus());
41c445ff
JB
8991 }
8992
2050bc65 8993 /* MFP mode enabled */
c78b953e 8994 if (pf->hw.func_caps.npar_enable || pf->hw.func_caps.flex10_enable) {
2050bc65
CS
8995 pf->flags |= I40E_FLAG_MFP_ENABLED;
8996 dev_info(&pf->pdev->dev, "MFP mode Enabled\n");
4fc8c676 8997 if (i40e_get_partition_bw_setting(pf)) {
f4492db1 8998 dev_warn(&pf->pdev->dev,
4fc8c676
SN
8999 "Could not get partition bw settings\n");
9000 } else {
f4492db1 9001 dev_info(&pf->pdev->dev,
4fc8c676
SN
9002 "Partition BW Min = %8.8x, Max = %8.8x\n",
9003 pf->min_bw, pf->max_bw);
9004
9005 /* nudge the Tx scheduler */
9006 i40e_set_partition_bw_setting(pf);
9007 }
2050bc65
CS
9008 }
9009
cbf61325
ASJ
9010 if ((pf->hw.func_caps.fd_filters_guaranteed > 0) ||
9011 (pf->hw.func_caps.fd_filters_best_effort > 0)) {
9012 pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
9013 pf->atr_sample_rate = I40E_DEFAULT_ATR_SAMPLE_RATE;
6eae9c6a
SN
9014 if (pf->flags & I40E_FLAG_MFP_ENABLED &&
9015 pf->hw.num_partitions > 1)
cbf61325 9016 dev_info(&pf->pdev->dev,
0b67584f 9017 "Flow Director Sideband mode Disabled in MFP mode\n");
6eae9c6a
SN
9018 else
9019 pf->flags |= I40E_FLAG_FD_SB_ENABLED;
cbf61325
ASJ
9020 pf->fdir_pf_filter_count =
9021 pf->hw.func_caps.fd_filters_guaranteed;
9022 pf->hw.fdir_shared_filter_count =
9023 pf->hw.func_caps.fd_filters_best_effort;
41c445ff
JB
9024 }
9025
5a433199 9026 if (pf->hw.mac.type == I40E_MAC_X722) {
d36e41dc
JK
9027 pf->hw_features |= (I40E_HW_RSS_AQ_CAPABLE |
9028 I40E_HW_128_QP_RSS_CAPABLE |
9029 I40E_HW_ATR_EVICT_CAPABLE |
9030 I40E_HW_WB_ON_ITR_CAPABLE |
9031 I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE |
9032 I40E_HW_NO_PCI_LINK_CHECK |
9033 I40E_HW_USE_SET_LLDP_MIB |
9034 I40E_HW_GENEVE_OFFLOAD_CAPABLE |
9035 I40E_HW_PTP_L4_CAPABLE |
9036 I40E_HW_WOL_MC_MAGIC_PKT_WAKE |
9037 I40E_HW_OUTER_UDP_CSUM_CAPABLE);
10a955ff
ASJ
9038
9039#define I40E_FDEVICT_PCTYPE_DEFAULT 0xc03
9040 if (rd32(&pf->hw, I40E_GLQF_FDEVICTENA(1)) !=
9041 I40E_FDEVICT_PCTYPE_DEFAULT) {
9042 dev_warn(&pf->pdev->dev,
9043 "FD EVICT PCTYPES are not right, disable FD HW EVICT\n");
9044 pf->hw_features &= ~I40E_HW_ATR_EVICT_CAPABLE;
9045 }
5a433199
ASJ
9046 } else if ((pf->hw.aq.api_maj_ver > 1) ||
9047 ((pf->hw.aq.api_maj_ver == 1) &&
9048 (pf->hw.aq.api_min_ver > 4))) {
9049 /* Supported in FW API version higher than 1.4 */
d36e41dc 9050 pf->hw_features |= I40E_HW_GENEVE_OFFLOAD_CAPABLE;
5a433199
ASJ
9051 }
9052
9053 /* Enable HW ATR eviction if possible */
d36e41dc 9054 if (pf->hw_features & I40E_HW_ATR_EVICT_CAPABLE)
5a433199
ASJ
9055 pf->flags |= I40E_FLAG_HW_ATR_EVICT_ENABLED;
9056
6de432c5 9057 if ((pf->hw.mac.type == I40E_MAC_XL710) &&
8eed76fa 9058 (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 33)) ||
f1bbad33 9059 (pf->hw.aq.fw_maj_ver < 4))) {
d36e41dc 9060 pf->hw_features |= I40E_HW_RESTART_AUTONEG;
f1bbad33 9061 /* No DCB support for FW < v4.33 */
d36e41dc 9062 pf->hw_features |= I40E_HW_NO_DCB_SUPPORT;
f1bbad33
NP
9063 }
9064
9065 /* Disable FW LLDP if FW < v4.3 */
6de432c5 9066 if ((pf->hw.mac.type == I40E_MAC_XL710) &&
f1bbad33
NP
9067 (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 3)) ||
9068 (pf->hw.aq.fw_maj_ver < 4)))
d36e41dc 9069 pf->hw_features |= I40E_HW_STOP_FW_LLDP;
f1bbad33
NP
9070
9071 /* Use the FW Set LLDP MIB API if FW > v4.40 */
6de432c5 9072 if ((pf->hw.mac.type == I40E_MAC_XL710) &&
f1bbad33
NP
9073 (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver >= 40)) ||
9074 (pf->hw.aq.fw_maj_ver >= 5)))
d36e41dc 9075 pf->hw_features |= I40E_HW_USE_SET_LLDP_MIB;
8eed76fa 9076
c3d26b75
AB
9077 /* Enable PTP L4 if FW > v6.0 */
9078 if (pf->hw.mac.type == I40E_MAC_XL710 &&
9079 pf->hw.aq.fw_maj_ver >= 6)
9080 pf->hw_features |= I40E_HW_PTP_L4_CAPABLE;
9081
41c445ff 9082 if (pf->hw.func_caps.vmdq) {
41c445ff 9083 pf->num_vmdq_vsis = I40E_DEFAULT_NUM_VMDQ_VSI;
e25d00b8 9084 pf->flags |= I40E_FLAG_VMDQ_ENABLED;
e9e53662 9085 pf->num_vmdq_qps = i40e_default_queues_per_vmdq(pf);
41c445ff
JB
9086 }
9087
e3219ce6
ASJ
9088 if (pf->hw.func_caps.iwarp) {
9089 pf->flags |= I40E_FLAG_IWARP_ENABLED;
9090 /* IWARP needs one extra vector for CQP just like MISC.*/
9091 pf->num_iwarp_msix = (int)num_online_cpus() + 1;
9092 }
9093
41c445ff 9094#ifdef CONFIG_PCI_IOV
ba252f13 9095 if (pf->hw.func_caps.num_vfs && pf->hw.partition_id == 1) {
41c445ff
JB
9096 pf->num_vf_qps = I40E_DEFAULT_QUEUES_PER_VF;
9097 pf->flags |= I40E_FLAG_SRIOV_ENABLED;
9098 pf->num_req_vfs = min_t(int,
9099 pf->hw.func_caps.num_vfs,
9100 I40E_MAX_VF_COUNT);
9101 }
9102#endif /* CONFIG_PCI_IOV */
9103 pf->eeprom_version = 0xDEAD;
9104 pf->lan_veb = I40E_NO_VEB;
9105 pf->lan_vsi = I40E_NO_VSI;
9106
d1a8d275
ASJ
9107 /* By default FW has this off for performance reasons */
9108 pf->flags &= ~I40E_FLAG_VEB_STATS_ENABLED;
9109
41c445ff
JB
9110 /* set up queue assignment tracking */
9111 size = sizeof(struct i40e_lump_tracking)
9112 + (sizeof(u16) * pf->hw.func_caps.num_tx_qp);
9113 pf->qp_pile = kzalloc(size, GFP_KERNEL);
9114 if (!pf->qp_pile) {
9115 err = -ENOMEM;
9116 goto sw_init_done;
9117 }
9118 pf->qp_pile->num_entries = pf->hw.func_caps.num_tx_qp;
9119 pf->qp_pile->search_hint = 0;
9120
327fe04b
ASJ
9121 pf->tx_timeout_recovery_level = 1;
9122
41c445ff
JB
9123 mutex_init(&pf->switch_mutex);
9124
9125sw_init_done:
9126 return err;
9127}
9128
7c3c288b
ASJ
9129/**
9130 * i40e_set_ntuple - set the ntuple feature flag and take action
9131 * @pf: board private structure to initialize
9132 * @features: the feature set that the stack is suggesting
9133 *
9134 * returns a bool to indicate if reset needs to happen
9135 **/
9136bool i40e_set_ntuple(struct i40e_pf *pf, netdev_features_t features)
9137{
9138 bool need_reset = false;
9139
9140 /* Check if Flow Director n-tuple support was enabled or disabled. If
9141 * the state changed, we need to reset.
9142 */
9143 if (features & NETIF_F_NTUPLE) {
9144 /* Enable filters and mark for reset */
9145 if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
9146 need_reset = true;
a70e407f
TD
9147 /* enable FD_SB only if there is MSI-X vector */
9148 if (pf->num_fdsb_msix > 0)
9149 pf->flags |= I40E_FLAG_FD_SB_ENABLED;
7c3c288b
ASJ
9150 } else {
9151 /* turn off filters, mark for reset and clear SW filter list */
9152 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
9153 need_reset = true;
9154 i40e_fdir_filter_exit(pf);
9155 }
47994c11
JK
9156 pf->flags &= ~(I40E_FLAG_FD_SB_ENABLED |
9157 I40E_FLAG_FD_SB_AUTO_DISABLED);
1e1be8f6 9158 /* reset fd counters */
097dbf52
JK
9159 pf->fd_add_err = 0;
9160 pf->fd_atr_cnt = 0;
8a4f34fb 9161 /* if ATR was auto disabled it can be re-enabled. */
47994c11
JK
9162 if (pf->flags & I40E_FLAG_FD_ATR_AUTO_DISABLED) {
9163 pf->flags &= ~I40E_FLAG_FD_ATR_AUTO_DISABLED;
9164 if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
9165 (I40E_DEBUG_FD & pf->hw.debug_mask))
234dc4e6
JK
9166 dev_info(&pf->pdev->dev, "ATR re-enabled.\n");
9167 }
7c3c288b
ASJ
9168 }
9169 return need_reset;
9170}
9171
d8ec9864
AB
9172/**
9173 * i40e_clear_rss_lut - clear the rx hash lookup table
9174 * @vsi: the VSI being configured
9175 **/
9176static void i40e_clear_rss_lut(struct i40e_vsi *vsi)
9177{
9178 struct i40e_pf *pf = vsi->back;
9179 struct i40e_hw *hw = &pf->hw;
9180 u16 vf_id = vsi->vf_id;
9181 u8 i;
9182
9183 if (vsi->type == I40E_VSI_MAIN) {
9184 for (i = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++)
9185 wr32(hw, I40E_PFQF_HLUT(i), 0);
9186 } else if (vsi->type == I40E_VSI_SRIOV) {
9187 for (i = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++)
9188 i40e_write_rx_ctl(hw, I40E_VFQF_HLUT1(i, vf_id), 0);
9189 } else {
9190 dev_err(&pf->pdev->dev, "Cannot set RSS LUT - invalid VSI type\n");
9191 }
9192}
9193
41c445ff
JB
9194/**
9195 * i40e_set_features - set the netdev feature flags
9196 * @netdev: ptr to the netdev being adjusted
9197 * @features: the feature set that the stack is suggesting
373149fc 9198 * Note: expects to be called while under rtnl_lock()
41c445ff
JB
9199 **/
9200static int i40e_set_features(struct net_device *netdev,
9201 netdev_features_t features)
9202{
9203 struct i40e_netdev_priv *np = netdev_priv(netdev);
9204 struct i40e_vsi *vsi = np->vsi;
7c3c288b
ASJ
9205 struct i40e_pf *pf = vsi->back;
9206 bool need_reset;
41c445ff 9207
d8ec9864
AB
9208 if (features & NETIF_F_RXHASH && !(netdev->features & NETIF_F_RXHASH))
9209 i40e_pf_config_rss(pf);
9210 else if (!(features & NETIF_F_RXHASH) &&
9211 netdev->features & NETIF_F_RXHASH)
9212 i40e_clear_rss_lut(vsi);
9213
41c445ff
JB
9214 if (features & NETIF_F_HW_VLAN_CTAG_RX)
9215 i40e_vlan_stripping_enable(vsi);
9216 else
9217 i40e_vlan_stripping_disable(vsi);
9218
7c3c288b
ASJ
9219 need_reset = i40e_set_ntuple(pf, features);
9220
9221 if (need_reset)
373149fc 9222 i40e_do_reset(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED), true);
7c3c288b 9223
41c445ff
JB
9224 return 0;
9225}
9226
a1c9a9d9 9227/**
6a899024 9228 * i40e_get_udp_port_idx - Lookup a possibly offloaded for Rx UDP port
a1c9a9d9
JK
9229 * @pf: board private structure
9230 * @port: The UDP port to look up
9231 *
9232 * Returns the index number or I40E_MAX_PF_UDP_OFFLOAD_PORTS if port not found
9233 **/
fe0b0cd9 9234static u8 i40e_get_udp_port_idx(struct i40e_pf *pf, u16 port)
a1c9a9d9
JK
9235{
9236 u8 i;
9237
9238 for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
27826fd5 9239 if (pf->udp_ports[i].port == port)
a1c9a9d9
JK
9240 return i;
9241 }
9242
9243 return i;
9244}
9245
9246/**
06a5f7f1 9247 * i40e_udp_tunnel_add - Get notifications about UDP tunnel ports that come up
a1c9a9d9 9248 * @netdev: This physical port's netdev
06a5f7f1 9249 * @ti: Tunnel endpoint information
a1c9a9d9 9250 **/
06a5f7f1
AD
9251static void i40e_udp_tunnel_add(struct net_device *netdev,
9252 struct udp_tunnel_info *ti)
a1c9a9d9
JK
9253{
9254 struct i40e_netdev_priv *np = netdev_priv(netdev);
9255 struct i40e_vsi *vsi = np->vsi;
9256 struct i40e_pf *pf = vsi->back;
fe0b0cd9 9257 u16 port = ntohs(ti->port);
a1c9a9d9
JK
9258 u8 next_idx;
9259 u8 idx;
9260
6a899024 9261 idx = i40e_get_udp_port_idx(pf, port);
a1c9a9d9
JK
9262
9263 /* Check if port already exists */
9264 if (idx < I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
fe0b0cd9 9265 netdev_info(netdev, "port %d already offloaded\n", port);
a1c9a9d9
JK
9266 return;
9267 }
9268
9269 /* Now check if there is space to add the new port */
6a899024 9270 next_idx = i40e_get_udp_port_idx(pf, 0);
a1c9a9d9
JK
9271
9272 if (next_idx == I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
06a5f7f1 9273 netdev_info(netdev, "maximum number of offloaded UDP ports reached, not adding port %d\n",
fe0b0cd9 9274 port);
6a899024
SA
9275 return;
9276 }
9277
06a5f7f1
AD
9278 switch (ti->type) {
9279 case UDP_TUNNEL_TYPE_VXLAN:
9280 pf->udp_ports[next_idx].type = I40E_AQC_TUNNEL_TYPE_VXLAN;
9281 break;
9282 case UDP_TUNNEL_TYPE_GENEVE:
d36e41dc 9283 if (!(pf->hw_features & I40E_HW_GENEVE_OFFLOAD_CAPABLE))
06a5f7f1
AD
9284 return;
9285 pf->udp_ports[next_idx].type = I40E_AQC_TUNNEL_TYPE_NGE;
9286 break;
9287 default:
6a899024
SA
9288 return;
9289 }
9290
9291 /* New port: add it and mark its index in the bitmap */
27826fd5 9292 pf->udp_ports[next_idx].port = port;
6a899024
SA
9293 pf->pending_udp_bitmap |= BIT_ULL(next_idx);
9294 pf->flags |= I40E_FLAG_UDP_FILTER_SYNC;
a1c9a9d9
JK
9295}
9296
6a899024 9297/**
06a5f7f1 9298 * i40e_udp_tunnel_del - Get notifications about UDP tunnel ports that go away
6a899024 9299 * @netdev: This physical port's netdev
06a5f7f1 9300 * @ti: Tunnel endpoint information
6a899024 9301 **/
06a5f7f1
AD
9302static void i40e_udp_tunnel_del(struct net_device *netdev,
9303 struct udp_tunnel_info *ti)
6a899024 9304{
6a899024
SA
9305 struct i40e_netdev_priv *np = netdev_priv(netdev);
9306 struct i40e_vsi *vsi = np->vsi;
9307 struct i40e_pf *pf = vsi->back;
fe0b0cd9 9308 u16 port = ntohs(ti->port);
6a899024
SA
9309 u8 idx;
9310
6a899024
SA
9311 idx = i40e_get_udp_port_idx(pf, port);
9312
9313 /* Check if port already exists */
06a5f7f1
AD
9314 if (idx >= I40E_MAX_PF_UDP_OFFLOAD_PORTS)
9315 goto not_found;
6a899024 9316
06a5f7f1
AD
9317 switch (ti->type) {
9318 case UDP_TUNNEL_TYPE_VXLAN:
9319 if (pf->udp_ports[idx].type != I40E_AQC_TUNNEL_TYPE_VXLAN)
9320 goto not_found;
9321 break;
9322 case UDP_TUNNEL_TYPE_GENEVE:
9323 if (pf->udp_ports[idx].type != I40E_AQC_TUNNEL_TYPE_NGE)
9324 goto not_found;
9325 break;
9326 default:
9327 goto not_found;
6a899024 9328 }
06a5f7f1
AD
9329
9330 /* if port exists, set it to 0 (mark for deletion)
9331 * and make it pending
9332 */
27826fd5 9333 pf->udp_ports[idx].port = 0;
06a5f7f1
AD
9334 pf->pending_udp_bitmap |= BIT_ULL(idx);
9335 pf->flags |= I40E_FLAG_UDP_FILTER_SYNC;
9336
9337 return;
9338not_found:
9339 netdev_warn(netdev, "UDP port %d was not found, not deleting\n",
fe0b0cd9 9340 port);
6a899024
SA
9341}
9342
1f224ad2 9343static int i40e_get_phys_port_id(struct net_device *netdev,
02637fce 9344 struct netdev_phys_item_id *ppid)
1f224ad2
NP
9345{
9346 struct i40e_netdev_priv *np = netdev_priv(netdev);
9347 struct i40e_pf *pf = np->vsi->back;
9348 struct i40e_hw *hw = &pf->hw;
9349
d36e41dc 9350 if (!(pf->hw_features & I40E_HW_PORT_ID_VALID))
1f224ad2
NP
9351 return -EOPNOTSUPP;
9352
9353 ppid->id_len = min_t(int, sizeof(hw->mac.port_addr), sizeof(ppid->id));
9354 memcpy(ppid->id, hw->mac.port_addr, ppid->id_len);
9355
9356 return 0;
9357}
9358
2f90ade6
JB
9359/**
9360 * i40e_ndo_fdb_add - add an entry to the hardware database
9361 * @ndm: the input from the stack
9362 * @tb: pointer to array of nladdr (unused)
9363 * @dev: the net device pointer
9364 * @addr: the MAC address entry being added
9365 * @flags: instructions from stack about fdb operation
9366 */
4ba0dea5
GR
9367static int i40e_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
9368 struct net_device *dev,
f6f6424b 9369 const unsigned char *addr, u16 vid,
4ba0dea5 9370 u16 flags)
4ba0dea5
GR
9371{
9372 struct i40e_netdev_priv *np = netdev_priv(dev);
9373 struct i40e_pf *pf = np->vsi->back;
9374 int err = 0;
9375
9376 if (!(pf->flags & I40E_FLAG_SRIOV_ENABLED))
9377 return -EOPNOTSUPP;
9378
65891fea
OG
9379 if (vid) {
9380 pr_info("%s: vlans aren't supported yet for dev_uc|mc_add()\n", dev->name);
9381 return -EINVAL;
9382 }
9383
4ba0dea5
GR
9384 /* Hardware does not support aging addresses so if a
9385 * ndm_state is given only allow permanent addresses
9386 */
9387 if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) {
9388 netdev_info(dev, "FDB only supports static addresses\n");
9389 return -EINVAL;
9390 }
9391
9392 if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
9393 err = dev_uc_add_excl(dev, addr);
9394 else if (is_multicast_ether_addr(addr))
9395 err = dev_mc_add_excl(dev, addr);
9396 else
9397 err = -EINVAL;
9398
9399 /* Only return duplicate errors if NLM_F_EXCL is set */
9400 if (err == -EEXIST && !(flags & NLM_F_EXCL))
9401 err = 0;
9402
9403 return err;
9404}
9405
51616018
NP
9406/**
9407 * i40e_ndo_bridge_setlink - Set the hardware bridge mode
9408 * @dev: the netdev being configured
9409 * @nlh: RTNL message
9410 *
9411 * Inserts a new hardware bridge if not already created and
9412 * enables the bridging mode requested (VEB or VEPA). If the
9413 * hardware bridge has already been inserted and the request
9414 * is to change the mode then that requires a PF reset to
9415 * allow rebuild of the components with required hardware
9416 * bridge mode enabled.
373149fc
MS
9417 *
9418 * Note: expects to be called while under rtnl_lock()
51616018
NP
9419 **/
9420static int i40e_ndo_bridge_setlink(struct net_device *dev,
9df70b66
CW
9421 struct nlmsghdr *nlh,
9422 u16 flags)
51616018
NP
9423{
9424 struct i40e_netdev_priv *np = netdev_priv(dev);
9425 struct i40e_vsi *vsi = np->vsi;
9426 struct i40e_pf *pf = vsi->back;
9427 struct i40e_veb *veb = NULL;
9428 struct nlattr *attr, *br_spec;
9429 int i, rem;
9430
9431 /* Only for PF VSI for now */
9432 if (vsi->seid != pf->vsi[pf->lan_vsi]->seid)
9433 return -EOPNOTSUPP;
9434
9435 /* Find the HW bridge for PF VSI */
9436 for (i = 0; i < I40E_MAX_VEB && !veb; i++) {
9437 if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid)
9438 veb = pf->veb[i];
9439 }
9440
9441 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
9442
9443 nla_for_each_nested(attr, br_spec, rem) {
9444 __u16 mode;
9445
9446 if (nla_type(attr) != IFLA_BRIDGE_MODE)
9447 continue;
9448
9449 mode = nla_get_u16(attr);
9450 if ((mode != BRIDGE_MODE_VEPA) &&
9451 (mode != BRIDGE_MODE_VEB))
9452 return -EINVAL;
9453
9454 /* Insert a new HW bridge */
9455 if (!veb) {
9456 veb = i40e_veb_setup(pf, 0, vsi->uplink_seid, vsi->seid,
9457 vsi->tc_config.enabled_tc);
9458 if (veb) {
9459 veb->bridge_mode = mode;
9460 i40e_config_bridge_mode(veb);
9461 } else {
9462 /* No Bridge HW offload available */
9463 return -ENOENT;
9464 }
9465 break;
9466 } else if (mode != veb->bridge_mode) {
9467 /* Existing HW bridge but different mode needs reset */
9468 veb->bridge_mode = mode;
fc60861e
ASJ
9469 /* TODO: If no VFs or VMDq VSIs, disallow VEB mode */
9470 if (mode == BRIDGE_MODE_VEB)
9471 pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
9472 else
9473 pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
373149fc
MS
9474 i40e_do_reset(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED),
9475 true);
51616018
NP
9476 break;
9477 }
9478 }
9479
9480 return 0;
9481}
9482
9483/**
9484 * i40e_ndo_bridge_getlink - Get the hardware bridge mode
9485 * @skb: skb buff
9486 * @pid: process id
9487 * @seq: RTNL message seq #
9488 * @dev: the netdev being configured
9489 * @filter_mask: unused
d4b2f9fe 9490 * @nlflags: netlink flags passed in
51616018
NP
9491 *
9492 * Return the mode in which the hardware bridge is operating in
9493 * i.e VEB or VEPA.
9494 **/
51616018
NP
9495static int i40e_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
9496 struct net_device *dev,
9f4ffc44
CW
9497 u32 __always_unused filter_mask,
9498 int nlflags)
51616018
NP
9499{
9500 struct i40e_netdev_priv *np = netdev_priv(dev);
9501 struct i40e_vsi *vsi = np->vsi;
9502 struct i40e_pf *pf = vsi->back;
9503 struct i40e_veb *veb = NULL;
9504 int i;
9505
9506 /* Only for PF VSI for now */
9507 if (vsi->seid != pf->vsi[pf->lan_vsi]->seid)
9508 return -EOPNOTSUPP;
9509
9510 /* Find the HW bridge for the PF VSI */
9511 for (i = 0; i < I40E_MAX_VEB && !veb; i++) {
9512 if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid)
9513 veb = pf->veb[i];
9514 }
9515
9516 if (!veb)
9517 return 0;
9518
46c264da 9519 return ndo_dflt_bridge_getlink(skb, pid, seq, dev, veb->bridge_mode,
599b076d 9520 0, 0, nlflags, filter_mask, NULL);
51616018 9521}
51616018 9522
f44a75e2
JS
9523/**
9524 * i40e_features_check - Validate encapsulated packet conforms to limits
9525 * @skb: skb buff
2bc11c63 9526 * @dev: This physical port's netdev
f44a75e2
JS
9527 * @features: Offload features that the stack believes apply
9528 **/
9529static netdev_features_t i40e_features_check(struct sk_buff *skb,
9530 struct net_device *dev,
9531 netdev_features_t features)
9532{
f114dca2
AD
9533 size_t len;
9534
9535 /* No point in doing any of this if neither checksum nor GSO are
9536 * being requested for this frame. We can rule out both by just
9537 * checking for CHECKSUM_PARTIAL
9538 */
9539 if (skb->ip_summed != CHECKSUM_PARTIAL)
9540 return features;
9541
9542 /* We cannot support GSO if the MSS is going to be less than
9543 * 64 bytes. If it is then we need to drop support for GSO.
9544 */
9545 if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_size < 64))
9546 features &= ~NETIF_F_GSO_MASK;
9547
9548 /* MACLEN can support at most 63 words */
9549 len = skb_network_header(skb) - skb->data;
9550 if (len & ~(63 * 2))
9551 goto out_err;
9552
9553 /* IPLEN and EIPLEN can support at most 127 dwords */
9554 len = skb_transport_header(skb) - skb_network_header(skb);
9555 if (len & ~(127 * 4))
9556 goto out_err;
9557
9558 if (skb->encapsulation) {
9559 /* L4TUNLEN can support 127 words */
9560 len = skb_inner_network_header(skb) - skb_transport_header(skb);
9561 if (len & ~(127 * 2))
9562 goto out_err;
9563
9564 /* IPLEN can support at most 127 dwords */
9565 len = skb_inner_transport_header(skb) -
9566 skb_inner_network_header(skb);
9567 if (len & ~(127 * 4))
9568 goto out_err;
9569 }
9570
9571 /* No need to validate L4LEN as TCP is the only protocol with a
9572 * a flexible value and we support all possible values supported
9573 * by TCP, which is at most 15 dwords
9574 */
f44a75e2
JS
9575
9576 return features;
f114dca2
AD
9577out_err:
9578 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
f44a75e2
JS
9579}
9580
0c8493d9
BT
9581/**
9582 * i40e_xdp_setup - add/remove an XDP program
9583 * @vsi: VSI to changed
9584 * @prog: XDP program
9585 **/
9586static int i40e_xdp_setup(struct i40e_vsi *vsi,
9587 struct bpf_prog *prog)
9588{
9589 int frame_size = vsi->netdev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
9590 struct i40e_pf *pf = vsi->back;
9591 struct bpf_prog *old_prog;
9592 bool need_reset;
9593 int i;
9594
9595 /* Don't allow frames that span over multiple buffers */
9596 if (frame_size > vsi->rx_buf_len)
9597 return -EINVAL;
9598
9599 if (!i40e_enabled_xdp_vsi(vsi) && !prog)
9600 return 0;
9601
9602 /* When turning XDP on->off/off->on we reset and rebuild the rings. */
9603 need_reset = (i40e_enabled_xdp_vsi(vsi) != !!prog);
9604
9605 if (need_reset)
9606 i40e_prep_for_reset(pf, true);
9607
9608 old_prog = xchg(&vsi->xdp_prog, prog);
9609
9610 if (need_reset)
9611 i40e_reset_and_rebuild(pf, true, true);
9612
9613 for (i = 0; i < vsi->num_queue_pairs; i++)
9614 WRITE_ONCE(vsi->rx_rings[i]->xdp_prog, vsi->xdp_prog);
9615
9616 if (old_prog)
9617 bpf_prog_put(old_prog);
9618
9619 return 0;
9620}
9621
9622/**
9623 * i40e_xdp - implements ndo_xdp for i40e
9624 * @dev: netdevice
9625 * @xdp: XDP command
9626 **/
9627static int i40e_xdp(struct net_device *dev,
9628 struct netdev_xdp *xdp)
9629{
9630 struct i40e_netdev_priv *np = netdev_priv(dev);
9631 struct i40e_vsi *vsi = np->vsi;
9632
9633 if (vsi->type != I40E_VSI_MAIN)
9634 return -EINVAL;
9635
9636 switch (xdp->command) {
9637 case XDP_SETUP_PROG:
9638 return i40e_xdp_setup(vsi, xdp->prog);
9639 case XDP_QUERY_PROG:
9640 xdp->prog_attached = i40e_enabled_xdp_vsi(vsi);
eb23039f 9641 xdp->prog_id = vsi->xdp_prog ? vsi->xdp_prog->aux->id : 0;
0c8493d9
BT
9642 return 0;
9643 default:
9644 return -EINVAL;
9645 }
9646}
9647
37a2973a 9648static const struct net_device_ops i40e_netdev_ops = {
41c445ff
JB
9649 .ndo_open = i40e_open,
9650 .ndo_stop = i40e_close,
9651 .ndo_start_xmit = i40e_lan_xmit_frame,
9652 .ndo_get_stats64 = i40e_get_netdev_stats_struct,
9653 .ndo_set_rx_mode = i40e_set_rx_mode,
9654 .ndo_validate_addr = eth_validate_addr,
9655 .ndo_set_mac_address = i40e_set_mac,
9656 .ndo_change_mtu = i40e_change_mtu,
beb0dff1 9657 .ndo_do_ioctl = i40e_ioctl,
41c445ff
JB
9658 .ndo_tx_timeout = i40e_tx_timeout,
9659 .ndo_vlan_rx_add_vid = i40e_vlan_rx_add_vid,
9660 .ndo_vlan_rx_kill_vid = i40e_vlan_rx_kill_vid,
9661#ifdef CONFIG_NET_POLL_CONTROLLER
9662 .ndo_poll_controller = i40e_netpoll,
9663#endif
e4c6734e 9664 .ndo_setup_tc = __i40e_setup_tc,
41c445ff
JB
9665 .ndo_set_features = i40e_set_features,
9666 .ndo_set_vf_mac = i40e_ndo_set_vf_mac,
9667 .ndo_set_vf_vlan = i40e_ndo_set_vf_port_vlan,
ed616689 9668 .ndo_set_vf_rate = i40e_ndo_set_vf_bw,
41c445ff 9669 .ndo_get_vf_config = i40e_ndo_get_vf_config,
588aefa0 9670 .ndo_set_vf_link_state = i40e_ndo_set_vf_link_state,
e6d9004d 9671 .ndo_set_vf_spoofchk = i40e_ndo_set_vf_spoofchk,
c3bbbd20 9672 .ndo_set_vf_trust = i40e_ndo_set_vf_trust,
06a5f7f1
AD
9673 .ndo_udp_tunnel_add = i40e_udp_tunnel_add,
9674 .ndo_udp_tunnel_del = i40e_udp_tunnel_del,
1f224ad2 9675 .ndo_get_phys_port_id = i40e_get_phys_port_id,
4ba0dea5 9676 .ndo_fdb_add = i40e_ndo_fdb_add,
f44a75e2 9677 .ndo_features_check = i40e_features_check,
51616018
NP
9678 .ndo_bridge_getlink = i40e_ndo_bridge_getlink,
9679 .ndo_bridge_setlink = i40e_ndo_bridge_setlink,
0c8493d9 9680 .ndo_xdp = i40e_xdp,
41c445ff
JB
9681};
9682
9683/**
9684 * i40e_config_netdev - Setup the netdev flags
9685 * @vsi: the VSI being configured
9686 *
9687 * Returns 0 on success, negative value on failure
9688 **/
9689static int i40e_config_netdev(struct i40e_vsi *vsi)
9690{
9691 struct i40e_pf *pf = vsi->back;
9692 struct i40e_hw *hw = &pf->hw;
9693 struct i40e_netdev_priv *np;
9694 struct net_device *netdev;
435c084a 9695 u8 broadcast[ETH_ALEN];
41c445ff
JB
9696 u8 mac_addr[ETH_ALEN];
9697 int etherdev_size;
bacd75cf
PB
9698 netdev_features_t hw_enc_features;
9699 netdev_features_t hw_features;
41c445ff
JB
9700
9701 etherdev_size = sizeof(struct i40e_netdev_priv);
f8ff1464 9702 netdev = alloc_etherdev_mq(etherdev_size, vsi->alloc_queue_pairs);
41c445ff
JB
9703 if (!netdev)
9704 return -ENOMEM;
9705
9706 vsi->netdev = netdev;
9707 np = netdev_priv(netdev);
9708 np->vsi = vsi;
9709
bacd75cf
PB
9710 hw_enc_features = NETIF_F_SG |
9711 NETIF_F_IP_CSUM |
9712 NETIF_F_IPV6_CSUM |
9713 NETIF_F_HIGHDMA |
9714 NETIF_F_SOFT_FEATURES |
9715 NETIF_F_TSO |
9716 NETIF_F_TSO_ECN |
9717 NETIF_F_TSO6 |
9718 NETIF_F_GSO_GRE |
9719 NETIF_F_GSO_GRE_CSUM |
9720 NETIF_F_GSO_PARTIAL |
9721 NETIF_F_GSO_UDP_TUNNEL |
9722 NETIF_F_GSO_UDP_TUNNEL_CSUM |
9723 NETIF_F_SCTP_CRC |
9724 NETIF_F_RXHASH |
9725 NETIF_F_RXCSUM |
9726 0;
41c445ff 9727
d36e41dc 9728 if (!(pf->hw_features & I40E_HW_OUTER_UDP_CSUM_CAPABLE))
1c7b4a23
AD
9729 netdev->gso_partial_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM;
9730
9731 netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM;
b0fe3306 9732
bacd75cf
PB
9733 netdev->hw_enc_features |= hw_enc_features;
9734
b0fe3306 9735 /* record features VLANs can make use of */
bacd75cf 9736 netdev->vlan_features |= hw_enc_features | NETIF_F_TSO_MANGLEID;
41c445ff 9737
2e86a0b6 9738 if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
b0fe3306 9739 netdev->hw_features |= NETIF_F_NTUPLE;
bacd75cf
PB
9740 hw_features = hw_enc_features |
9741 NETIF_F_HW_VLAN_CTAG_TX |
9742 NETIF_F_HW_VLAN_CTAG_RX;
b0fe3306 9743
bacd75cf 9744 netdev->hw_features |= hw_features;
2e86a0b6 9745
bacd75cf 9746 netdev->features |= hw_features | NETIF_F_HW_VLAN_CTAG_FILTER;
1c7b4a23 9747 netdev->hw_enc_features |= NETIF_F_TSO_MANGLEID;
41c445ff
JB
9748
9749 if (vsi->type == I40E_VSI_MAIN) {
9750 SET_NETDEV_DEV(netdev, &pf->pdev->dev);
9a173901 9751 ether_addr_copy(mac_addr, hw->mac.perm_addr);
41c4c2b5
JK
9752 /* The following steps are necessary for two reasons. First,
9753 * some older NVM configurations load a default MAC-VLAN
9754 * filter that will accept any tagged packet, and we want to
9755 * replace this with a normal filter. Additionally, it is
9756 * possible our MAC address was provided by the platform using
9757 * Open Firmware or similar.
9758 *
9759 * Thus, we need to remove the default filter and install one
9760 * specific to the MAC address.
1596b5dd
JK
9761 */
9762 i40e_rm_default_mac_filter(vsi, mac_addr);
278e7d0b 9763 spin_lock_bh(&vsi->mac_filter_hash_lock);
9569a9a4 9764 i40e_add_mac_filter(vsi, mac_addr);
278e7d0b 9765 spin_unlock_bh(&vsi->mac_filter_hash_lock);
41c445ff 9766 } else {
8c9eb350
JK
9767 /* Relate the VSI_VMDQ name to the VSI_MAIN name. Note that we
9768 * are still limited by IFNAMSIZ, but we're adding 'v%d\0' to
9769 * the end, which is 4 bytes long, so force truncation of the
9770 * original name by IFNAMSIZ - 4
9771 */
9772 snprintf(netdev->name, IFNAMSIZ, "%.*sv%%d",
9773 IFNAMSIZ - 4,
41c445ff
JB
9774 pf->vsi[pf->lan_vsi]->netdev->name);
9775 random_ether_addr(mac_addr);
21659035 9776
278e7d0b 9777 spin_lock_bh(&vsi->mac_filter_hash_lock);
9569a9a4 9778 i40e_add_mac_filter(vsi, mac_addr);
278e7d0b 9779 spin_unlock_bh(&vsi->mac_filter_hash_lock);
41c445ff 9780 }
21659035 9781
435c084a
JK
9782 /* Add the broadcast filter so that we initially will receive
9783 * broadcast packets. Note that when a new VLAN is first added the
9784 * driver will convert all filters marked I40E_VLAN_ANY into VLAN
9785 * specific filters as part of transitioning into "vlan" operation.
9786 * When more VLANs are added, the driver will copy each existing MAC
9787 * filter and add it for the new VLAN.
9788 *
9789 * Broadcast filters are handled specially by
9790 * i40e_sync_filters_subtask, as the driver must to set the broadcast
9791 * promiscuous bit instead of adding this directly as a MAC/VLAN
9792 * filter. The subtask will update the correct broadcast promiscuous
9793 * bits as VLANs become active or inactive.
9794 */
9795 eth_broadcast_addr(broadcast);
9796 spin_lock_bh(&vsi->mac_filter_hash_lock);
9569a9a4 9797 i40e_add_mac_filter(vsi, broadcast);
435c084a
JK
9798 spin_unlock_bh(&vsi->mac_filter_hash_lock);
9799
9a173901
GR
9800 ether_addr_copy(netdev->dev_addr, mac_addr);
9801 ether_addr_copy(netdev->perm_addr, mac_addr);
b0fe3306 9802
41c445ff
JB
9803 netdev->priv_flags |= IFF_UNICAST_FLT;
9804 netdev->priv_flags |= IFF_SUPP_NOFCS;
9805 /* Setup netdev TC information */
9806 i40e_vsi_config_netdev_tc(vsi, vsi->tc_config.enabled_tc);
9807
9808 netdev->netdev_ops = &i40e_netdev_ops;
9809 netdev->watchdog_timeo = 5 * HZ;
9810 i40e_set_ethtool_ops(netdev);
9811
91c527a5
JW
9812 /* MTU range: 68 - 9706 */
9813 netdev->min_mtu = ETH_MIN_MTU;
1e3a5fd5 9814 netdev->max_mtu = I40E_MAX_RXBUFFER - I40E_PACKET_HDR_PAD;
91c527a5 9815
41c445ff
JB
9816 return 0;
9817}
9818
9819/**
9820 * i40e_vsi_delete - Delete a VSI from the switch
9821 * @vsi: the VSI being removed
9822 *
9823 * Returns 0 on success, negative value on failure
9824 **/
9825static void i40e_vsi_delete(struct i40e_vsi *vsi)
9826{
9827 /* remove default VSI is not allowed */
9828 if (vsi == vsi->back->vsi[vsi->back->lan_vsi])
9829 return;
9830
41c445ff 9831 i40e_aq_delete_element(&vsi->back->hw, vsi->seid, NULL);
41c445ff
JB
9832}
9833
51616018
NP
9834/**
9835 * i40e_is_vsi_uplink_mode_veb - Check if the VSI's uplink bridge mode is VEB
9836 * @vsi: the VSI being queried
9837 *
9838 * Returns 1 if HW bridge mode is VEB and return 0 in case of VEPA mode
9839 **/
9840int i40e_is_vsi_uplink_mode_veb(struct i40e_vsi *vsi)
9841{
9842 struct i40e_veb *veb;
9843 struct i40e_pf *pf = vsi->back;
9844
9845 /* Uplink is not a bridge so default to VEB */
9846 if (vsi->veb_idx == I40E_NO_VEB)
9847 return 1;
9848
9849 veb = pf->veb[vsi->veb_idx];
09603eaa
AA
9850 if (!veb) {
9851 dev_info(&pf->pdev->dev,
9852 "There is no veb associated with the bridge\n");
9853 return -ENOENT;
9854 }
9855
51616018 9856 /* Uplink is a bridge in VEPA mode */
09603eaa 9857 if (veb->bridge_mode & BRIDGE_MODE_VEPA) {
51616018 9858 return 0;
09603eaa
AA
9859 } else {
9860 /* Uplink is a bridge in VEB mode */
9861 return 1;
9862 }
51616018 9863
09603eaa
AA
9864 /* VEPA is now default bridge, so return 0 */
9865 return 0;
51616018
NP
9866}
9867
41c445ff
JB
9868/**
9869 * i40e_add_vsi - Add a VSI to the switch
9870 * @vsi: the VSI being configured
9871 *
9872 * This initializes a VSI context depending on the VSI type to be added and
9873 * passes it down to the add_vsi aq command.
9874 **/
9875static int i40e_add_vsi(struct i40e_vsi *vsi)
9876{
9877 int ret = -ENODEV;
41c445ff
JB
9878 struct i40e_pf *pf = vsi->back;
9879 struct i40e_hw *hw = &pf->hw;
9880 struct i40e_vsi_context ctxt;
278e7d0b
JK
9881 struct i40e_mac_filter *f;
9882 struct hlist_node *h;
9883 int bkt;
21659035 9884
41c445ff
JB
9885 u8 enabled_tc = 0x1; /* TC0 enabled */
9886 int f_count = 0;
9887
9888 memset(&ctxt, 0, sizeof(ctxt));
9889 switch (vsi->type) {
9890 case I40E_VSI_MAIN:
9891 /* The PF's main VSI is already setup as part of the
9892 * device initialization, so we'll not bother with
9893 * the add_vsi call, but we will retrieve the current
9894 * VSI context.
9895 */
9896 ctxt.seid = pf->main_vsi_seid;
9897 ctxt.pf_num = pf->hw.pf_id;
9898 ctxt.vf_num = 0;
9899 ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
9900 ctxt.flags = I40E_AQ_VSI_TYPE_PF;
9901 if (ret) {
9902 dev_info(&pf->pdev->dev,
f1c7e72e
SN
9903 "couldn't get PF vsi config, err %s aq_err %s\n",
9904 i40e_stat_str(&pf->hw, ret),
9905 i40e_aq_str(&pf->hw,
9906 pf->hw.aq.asq_last_status));
41c445ff
JB
9907 return -ENOENT;
9908 }
1a2f6248 9909 vsi->info = ctxt.info;
41c445ff
JB
9910 vsi->info.valid_sections = 0;
9911
9912 vsi->seid = ctxt.seid;
9913 vsi->id = ctxt.vsi_number;
9914
9915 enabled_tc = i40e_pf_get_tc_map(pf);
9916
64615b54
MW
9917 /* Source pruning is enabled by default, so the flag is
9918 * negative logic - if it's set, we need to fiddle with
9919 * the VSI to disable source pruning.
9920 */
9921 if (pf->flags & I40E_FLAG_SOURCE_PRUNING_DISABLED) {
9922 memset(&ctxt, 0, sizeof(ctxt));
9923 ctxt.seid = pf->main_vsi_seid;
9924 ctxt.pf_num = pf->hw.pf_id;
9925 ctxt.vf_num = 0;
9926 ctxt.info.valid_sections |=
9927 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
9928 ctxt.info.switch_id =
9929 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_LOCAL_LB);
9930 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
9931 if (ret) {
9932 dev_info(&pf->pdev->dev,
9933 "update vsi failed, err %s aq_err %s\n",
9934 i40e_stat_str(&pf->hw, ret),
9935 i40e_aq_str(&pf->hw,
9936 pf->hw.aq.asq_last_status));
9937 ret = -ENOENT;
9938 goto err;
9939 }
9940 }
9941
41c445ff 9942 /* MFP mode setup queue map and update VSI */
63d7e5a4
NP
9943 if ((pf->flags & I40E_FLAG_MFP_ENABLED) &&
9944 !(pf->hw.func_caps.iscsi)) { /* NIC type PF */
41c445ff
JB
9945 memset(&ctxt, 0, sizeof(ctxt));
9946 ctxt.seid = pf->main_vsi_seid;
9947 ctxt.pf_num = pf->hw.pf_id;
9948 ctxt.vf_num = 0;
9949 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, false);
9950 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
9951 if (ret) {
9952 dev_info(&pf->pdev->dev,
f1c7e72e
SN
9953 "update vsi failed, err %s aq_err %s\n",
9954 i40e_stat_str(&pf->hw, ret),
9955 i40e_aq_str(&pf->hw,
9956 pf->hw.aq.asq_last_status));
41c445ff
JB
9957 ret = -ENOENT;
9958 goto err;
9959 }
9960 /* update the local VSI info queue map */
9961 i40e_vsi_update_queue_map(vsi, &ctxt);
9962 vsi->info.valid_sections = 0;
9963 } else {
9964 /* Default/Main VSI is only enabled for TC0
9965 * reconfigure it to enable all TCs that are
9966 * available on the port in SFP mode.
63d7e5a4
NP
9967 * For MFP case the iSCSI PF would use this
9968 * flow to enable LAN+iSCSI TC.
41c445ff
JB
9969 */
9970 ret = i40e_vsi_config_tc(vsi, enabled_tc);
9971 if (ret) {
19279235
CW
9972 /* Single TC condition is not fatal,
9973 * message and continue
9974 */
41c445ff 9975 dev_info(&pf->pdev->dev,
f1c7e72e
SN
9976 "failed to configure TCs for main VSI tc_map 0x%08x, err %s aq_err %s\n",
9977 enabled_tc,
9978 i40e_stat_str(&pf->hw, ret),
9979 i40e_aq_str(&pf->hw,
9980 pf->hw.aq.asq_last_status));
41c445ff
JB
9981 }
9982 }
9983 break;
9984
9985 case I40E_VSI_FDIR:
cbf61325
ASJ
9986 ctxt.pf_num = hw->pf_id;
9987 ctxt.vf_num = 0;
9988 ctxt.uplink_seid = vsi->uplink_seid;
2b18e591 9989 ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
cbf61325 9990 ctxt.flags = I40E_AQ_VSI_TYPE_PF;
fc60861e
ASJ
9991 if ((pf->flags & I40E_FLAG_VEB_MODE_ENABLED) &&
9992 (i40e_is_vsi_uplink_mode_veb(vsi))) {
51616018 9993 ctxt.info.valid_sections |=
fc60861e 9994 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
51616018 9995 ctxt.info.switch_id =
fc60861e 9996 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
51616018 9997 }
41c445ff 9998 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
41c445ff
JB
9999 break;
10000
10001 case I40E_VSI_VMDQ2:
10002 ctxt.pf_num = hw->pf_id;
10003 ctxt.vf_num = 0;
10004 ctxt.uplink_seid = vsi->uplink_seid;
2b18e591 10005 ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
41c445ff
JB
10006 ctxt.flags = I40E_AQ_VSI_TYPE_VMDQ2;
10007
41c445ff
JB
10008 /* This VSI is connected to VEB so the switch_id
10009 * should be set to zero by default.
10010 */
51616018
NP
10011 if (i40e_is_vsi_uplink_mode_veb(vsi)) {
10012 ctxt.info.valid_sections |=
10013 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
10014 ctxt.info.switch_id =
10015 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
10016 }
41c445ff
JB
10017
10018 /* Setup the VSI tx/rx queue map for TC0 only for now */
10019 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
10020 break;
10021
10022 case I40E_VSI_SRIOV:
10023 ctxt.pf_num = hw->pf_id;
10024 ctxt.vf_num = vsi->vf_id + hw->func_caps.vf_base_id;
10025 ctxt.uplink_seid = vsi->uplink_seid;
2b18e591 10026 ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
41c445ff
JB
10027 ctxt.flags = I40E_AQ_VSI_TYPE_VF;
10028
41c445ff
JB
10029 /* This VSI is connected to VEB so the switch_id
10030 * should be set to zero by default.
10031 */
51616018
NP
10032 if (i40e_is_vsi_uplink_mode_veb(vsi)) {
10033 ctxt.info.valid_sections |=
10034 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
10035 ctxt.info.switch_id =
10036 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
10037 }
41c445ff 10038
e3219ce6
ASJ
10039 if (vsi->back->flags & I40E_FLAG_IWARP_ENABLED) {
10040 ctxt.info.valid_sections |=
10041 cpu_to_le16(I40E_AQ_VSI_PROP_QUEUE_OPT_VALID);
10042 ctxt.info.queueing_opt_flags |=
4b28cdba
AS
10043 (I40E_AQ_VSI_QUE_OPT_TCP_ENA |
10044 I40E_AQ_VSI_QUE_OPT_RSS_LUT_VSI);
e3219ce6
ASJ
10045 }
10046
41c445ff
JB
10047 ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
10048 ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL;
c674d125
MW
10049 if (pf->vf[vsi->vf_id].spoofchk) {
10050 ctxt.info.valid_sections |=
10051 cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID);
10052 ctxt.info.sec_flags |=
10053 (I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK |
10054 I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK);
10055 }
41c445ff
JB
10056 /* Setup the VSI tx/rx queue map for TC0 only for now */
10057 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
10058 break;
10059
e3219ce6
ASJ
10060 case I40E_VSI_IWARP:
10061 /* send down message to iWARP */
10062 break;
10063
41c445ff
JB
10064 default:
10065 return -ENODEV;
10066 }
10067
10068 if (vsi->type != I40E_VSI_MAIN) {
10069 ret = i40e_aq_add_vsi(hw, &ctxt, NULL);
10070 if (ret) {
10071 dev_info(&vsi->back->pdev->dev,
f1c7e72e
SN
10072 "add vsi failed, err %s aq_err %s\n",
10073 i40e_stat_str(&pf->hw, ret),
10074 i40e_aq_str(&pf->hw,
10075 pf->hw.aq.asq_last_status));
41c445ff
JB
10076 ret = -ENOENT;
10077 goto err;
10078 }
1a2f6248 10079 vsi->info = ctxt.info;
41c445ff
JB
10080 vsi->info.valid_sections = 0;
10081 vsi->seid = ctxt.seid;
10082 vsi->id = ctxt.vsi_number;
10083 }
10084
c3c7ea27 10085 vsi->active_filters = 0;
0da36b97 10086 clear_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
278e7d0b 10087 spin_lock_bh(&vsi->mac_filter_hash_lock);
41c445ff 10088 /* If macvlan filters already exist, force them to get loaded */
278e7d0b 10089 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
c3c7ea27 10090 f->state = I40E_FILTER_NEW;
41c445ff 10091 f_count++;
21659035 10092 }
278e7d0b 10093 spin_unlock_bh(&vsi->mac_filter_hash_lock);
30650cc5 10094
41c445ff
JB
10095 if (f_count) {
10096 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
10097 pf->flags |= I40E_FLAG_FILTER_SYNC;
10098 }
10099
10100 /* Update VSI BW information */
10101 ret = i40e_vsi_get_bw_info(vsi);
10102 if (ret) {
10103 dev_info(&pf->pdev->dev,
f1c7e72e
SN
10104 "couldn't get vsi bw info, err %s aq_err %s\n",
10105 i40e_stat_str(&pf->hw, ret),
10106 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
41c445ff
JB
10107 /* VSI is already added so not tearing that up */
10108 ret = 0;
10109 }
10110
10111err:
10112 return ret;
10113}
10114
10115/**
10116 * i40e_vsi_release - Delete a VSI and free its resources
10117 * @vsi: the VSI being removed
10118 *
10119 * Returns 0 on success or < 0 on error
10120 **/
10121int i40e_vsi_release(struct i40e_vsi *vsi)
10122{
278e7d0b
JK
10123 struct i40e_mac_filter *f;
10124 struct hlist_node *h;
41c445ff
JB
10125 struct i40e_veb *veb = NULL;
10126 struct i40e_pf *pf;
10127 u16 uplink_seid;
278e7d0b 10128 int i, n, bkt;
41c445ff
JB
10129
10130 pf = vsi->back;
10131
10132 /* release of a VEB-owner or last VSI is not allowed */
10133 if (vsi->flags & I40E_VSI_FLAG_VEB_OWNER) {
10134 dev_info(&pf->pdev->dev, "VSI %d has existing VEB %d\n",
10135 vsi->seid, vsi->uplink_seid);
10136 return -ENODEV;
10137 }
10138 if (vsi == pf->vsi[pf->lan_vsi] &&
9e6c9c0f 10139 !test_bit(__I40E_DOWN, pf->state)) {
41c445ff
JB
10140 dev_info(&pf->pdev->dev, "Can't remove PF VSI\n");
10141 return -ENODEV;
10142 }
10143
10144 uplink_seid = vsi->uplink_seid;
10145 if (vsi->type != I40E_VSI_SRIOV) {
10146 if (vsi->netdev_registered) {
10147 vsi->netdev_registered = false;
10148 if (vsi->netdev) {
10149 /* results in a call to i40e_close() */
10150 unregister_netdev(vsi->netdev);
41c445ff
JB
10151 }
10152 } else {
90ef8d47 10153 i40e_vsi_close(vsi);
41c445ff
JB
10154 }
10155 i40e_vsi_disable_irq(vsi);
10156 }
10157
278e7d0b 10158 spin_lock_bh(&vsi->mac_filter_hash_lock);
6622f5cd
JK
10159
10160 /* clear the sync flag on all filters */
10161 if (vsi->netdev) {
10162 __dev_uc_unsync(vsi->netdev, NULL);
10163 __dev_mc_unsync(vsi->netdev, NULL);
10164 }
10165
10166 /* make sure any remaining filters are marked for deletion */
278e7d0b 10167 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist)
290d2557 10168 __i40e_del_filter(vsi, f);
6622f5cd 10169
278e7d0b 10170 spin_unlock_bh(&vsi->mac_filter_hash_lock);
21659035 10171
17652c63 10172 i40e_sync_vsi_filters(vsi);
41c445ff
JB
10173
10174 i40e_vsi_delete(vsi);
10175 i40e_vsi_free_q_vectors(vsi);
a4866597
SN
10176 if (vsi->netdev) {
10177 free_netdev(vsi->netdev);
10178 vsi->netdev = NULL;
10179 }
41c445ff
JB
10180 i40e_vsi_clear_rings(vsi);
10181 i40e_vsi_clear(vsi);
10182
10183 /* If this was the last thing on the VEB, except for the
10184 * controlling VSI, remove the VEB, which puts the controlling
10185 * VSI onto the next level down in the switch.
10186 *
10187 * Well, okay, there's one more exception here: don't remove
10188 * the orphan VEBs yet. We'll wait for an explicit remove request
10189 * from up the network stack.
10190 */
505682cd 10191 for (n = 0, i = 0; i < pf->num_alloc_vsi; i++) {
41c445ff
JB
10192 if (pf->vsi[i] &&
10193 pf->vsi[i]->uplink_seid == uplink_seid &&
10194 (pf->vsi[i]->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) {
10195 n++; /* count the VSIs */
10196 }
10197 }
10198 for (i = 0; i < I40E_MAX_VEB; i++) {
10199 if (!pf->veb[i])
10200 continue;
10201 if (pf->veb[i]->uplink_seid == uplink_seid)
10202 n++; /* count the VEBs */
10203 if (pf->veb[i]->seid == uplink_seid)
10204 veb = pf->veb[i];
10205 }
10206 if (n == 0 && veb && veb->uplink_seid != 0)
10207 i40e_veb_release(veb);
10208
10209 return 0;
10210}
10211
10212/**
10213 * i40e_vsi_setup_vectors - Set up the q_vectors for the given VSI
10214 * @vsi: ptr to the VSI
10215 *
10216 * This should only be called after i40e_vsi_mem_alloc() which allocates the
10217 * corresponding SW VSI structure and initializes num_queue_pairs for the
10218 * newly allocated VSI.
10219 *
10220 * Returns 0 on success or negative on failure
10221 **/
10222static int i40e_vsi_setup_vectors(struct i40e_vsi *vsi)
10223{
10224 int ret = -ENOENT;
10225 struct i40e_pf *pf = vsi->back;
10226
493fb300 10227 if (vsi->q_vectors[0]) {
41c445ff
JB
10228 dev_info(&pf->pdev->dev, "VSI %d has existing q_vectors\n",
10229 vsi->seid);
10230 return -EEXIST;
10231 }
10232
10233 if (vsi->base_vector) {
f29eaa3d 10234 dev_info(&pf->pdev->dev, "VSI %d has non-zero base vector %d\n",
41c445ff
JB
10235 vsi->seid, vsi->base_vector);
10236 return -EEXIST;
10237 }
10238
90e04070 10239 ret = i40e_vsi_alloc_q_vectors(vsi);
41c445ff
JB
10240 if (ret) {
10241 dev_info(&pf->pdev->dev,
10242 "failed to allocate %d q_vector for VSI %d, ret=%d\n",
10243 vsi->num_q_vectors, vsi->seid, ret);
10244 vsi->num_q_vectors = 0;
10245 goto vector_setup_out;
10246 }
10247
26cdc443
ASJ
10248 /* In Legacy mode, we do not have to get any other vector since we
10249 * piggyback on the misc/ICR0 for queue interrupts.
10250 */
10251 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED))
10252 return ret;
958a3e3b
SN
10253 if (vsi->num_q_vectors)
10254 vsi->base_vector = i40e_get_lump(pf, pf->irq_pile,
10255 vsi->num_q_vectors, vsi->idx);
41c445ff
JB
10256 if (vsi->base_vector < 0) {
10257 dev_info(&pf->pdev->dev,
049a2be8
SN
10258 "failed to get tracking for %d vectors for VSI %d, err=%d\n",
10259 vsi->num_q_vectors, vsi->seid, vsi->base_vector);
41c445ff
JB
10260 i40e_vsi_free_q_vectors(vsi);
10261 ret = -ENOENT;
10262 goto vector_setup_out;
10263 }
10264
10265vector_setup_out:
10266 return ret;
10267}
10268
bc7d338f
ASJ
10269/**
10270 * i40e_vsi_reinit_setup - return and reallocate resources for a VSI
10271 * @vsi: pointer to the vsi.
10272 *
10273 * This re-allocates a vsi's queue resources.
10274 *
10275 * Returns pointer to the successfully allocated and configured VSI sw struct
10276 * on success, otherwise returns NULL on failure.
10277 **/
10278static struct i40e_vsi *i40e_vsi_reinit_setup(struct i40e_vsi *vsi)
10279{
74608d17 10280 u16 alloc_queue_pairs;
f534039d 10281 struct i40e_pf *pf;
bc7d338f
ASJ
10282 u8 enabled_tc;
10283 int ret;
10284
f534039d
JU
10285 if (!vsi)
10286 return NULL;
10287
10288 pf = vsi->back;
10289
bc7d338f
ASJ
10290 i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx);
10291 i40e_vsi_clear_rings(vsi);
10292
10293 i40e_vsi_free_arrays(vsi, false);
10294 i40e_set_num_rings_in_vsi(vsi);
10295 ret = i40e_vsi_alloc_arrays(vsi, false);
10296 if (ret)
10297 goto err_vsi;
10298
74608d17
BT
10299 alloc_queue_pairs = vsi->alloc_queue_pairs *
10300 (i40e_enabled_xdp_vsi(vsi) ? 2 : 1);
10301
10302 ret = i40e_get_lump(pf, pf->qp_pile, alloc_queue_pairs, vsi->idx);
bc7d338f 10303 if (ret < 0) {
049a2be8 10304 dev_info(&pf->pdev->dev,
f1c7e72e 10305 "failed to get tracking for %d queues for VSI %d err %d\n",
74608d17 10306 alloc_queue_pairs, vsi->seid, ret);
bc7d338f
ASJ
10307 goto err_vsi;
10308 }
10309 vsi->base_queue = ret;
10310
10311 /* Update the FW view of the VSI. Force a reset of TC and queue
10312 * layout configurations.
10313 */
10314 enabled_tc = pf->vsi[pf->lan_vsi]->tc_config.enabled_tc;
10315 pf->vsi[pf->lan_vsi]->tc_config.enabled_tc = 0;
10316 pf->vsi[pf->lan_vsi]->seid = pf->main_vsi_seid;
10317 i40e_vsi_config_tc(pf->vsi[pf->lan_vsi], enabled_tc);
1596b5dd
JK
10318 if (vsi->type == I40E_VSI_MAIN)
10319 i40e_rm_default_mac_filter(vsi, pf->hw.mac.perm_addr);
bc7d338f
ASJ
10320
10321 /* assign it some queues */
10322 ret = i40e_alloc_rings(vsi);
10323 if (ret)
10324 goto err_rings;
10325
10326 /* map all of the rings to the q_vectors */
10327 i40e_vsi_map_rings_to_vectors(vsi);
10328 return vsi;
10329
10330err_rings:
10331 i40e_vsi_free_q_vectors(vsi);
10332 if (vsi->netdev_registered) {
10333 vsi->netdev_registered = false;
10334 unregister_netdev(vsi->netdev);
10335 free_netdev(vsi->netdev);
10336 vsi->netdev = NULL;
10337 }
10338 i40e_aq_delete_element(&pf->hw, vsi->seid, NULL);
10339err_vsi:
10340 i40e_vsi_clear(vsi);
10341 return NULL;
10342}
10343
41c445ff
JB
10344/**
10345 * i40e_vsi_setup - Set up a VSI by a given type
10346 * @pf: board private structure
10347 * @type: VSI type
10348 * @uplink_seid: the switch element to link to
10349 * @param1: usage depends upon VSI type. For VF types, indicates VF id
10350 *
10351 * This allocates the sw VSI structure and its queue resources, then add a VSI
10352 * to the identified VEB.
10353 *
10354 * Returns pointer to the successfully allocated and configure VSI sw struct on
10355 * success, otherwise returns NULL on failure.
10356 **/
10357struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
10358 u16 uplink_seid, u32 param1)
10359{
10360 struct i40e_vsi *vsi = NULL;
10361 struct i40e_veb *veb = NULL;
74608d17 10362 u16 alloc_queue_pairs;
41c445ff
JB
10363 int ret, i;
10364 int v_idx;
10365
10366 /* The requested uplink_seid must be either
10367 * - the PF's port seid
10368 * no VEB is needed because this is the PF
10369 * or this is a Flow Director special case VSI
10370 * - seid of an existing VEB
10371 * - seid of a VSI that owns an existing VEB
10372 * - seid of a VSI that doesn't own a VEB
10373 * a new VEB is created and the VSI becomes the owner
10374 * - seid of the PF VSI, which is what creates the first VEB
10375 * this is a special case of the previous
10376 *
10377 * Find which uplink_seid we were given and create a new VEB if needed
10378 */
10379 for (i = 0; i < I40E_MAX_VEB; i++) {
10380 if (pf->veb[i] && pf->veb[i]->seid == uplink_seid) {
10381 veb = pf->veb[i];
10382 break;
10383 }
10384 }
10385
10386 if (!veb && uplink_seid != pf->mac_seid) {
10387
505682cd 10388 for (i = 0; i < pf->num_alloc_vsi; i++) {
41c445ff
JB
10389 if (pf->vsi[i] && pf->vsi[i]->seid == uplink_seid) {
10390 vsi = pf->vsi[i];
10391 break;
10392 }
10393 }
10394 if (!vsi) {
10395 dev_info(&pf->pdev->dev, "no such uplink_seid %d\n",
10396 uplink_seid);
10397 return NULL;
10398 }
10399
10400 if (vsi->uplink_seid == pf->mac_seid)
10401 veb = i40e_veb_setup(pf, 0, pf->mac_seid, vsi->seid,
10402 vsi->tc_config.enabled_tc);
10403 else if ((vsi->flags & I40E_VSI_FLAG_VEB_OWNER) == 0)
10404 veb = i40e_veb_setup(pf, 0, vsi->uplink_seid, vsi->seid,
10405 vsi->tc_config.enabled_tc);
79c21a82
ASJ
10406 if (veb) {
10407 if (vsi->seid != pf->vsi[pf->lan_vsi]->seid) {
10408 dev_info(&vsi->back->pdev->dev,
fb43201f 10409 "New VSI creation error, uplink seid of LAN VSI expected.\n");
79c21a82
ASJ
10410 return NULL;
10411 }
fa11cb3d
ASJ
10412 /* We come up by default in VEPA mode if SRIOV is not
10413 * already enabled, in which case we can't force VEPA
10414 * mode.
10415 */
10416 if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) {
10417 veb->bridge_mode = BRIDGE_MODE_VEPA;
10418 pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
10419 }
51616018 10420 i40e_config_bridge_mode(veb);
79c21a82 10421 }
41c445ff
JB
10422 for (i = 0; i < I40E_MAX_VEB && !veb; i++) {
10423 if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid)
10424 veb = pf->veb[i];
10425 }
10426 if (!veb) {
10427 dev_info(&pf->pdev->dev, "couldn't add VEB\n");
10428 return NULL;
10429 }
10430
10431 vsi->flags |= I40E_VSI_FLAG_VEB_OWNER;
10432 uplink_seid = veb->seid;
10433 }
10434
10435 /* get vsi sw struct */
10436 v_idx = i40e_vsi_mem_alloc(pf, type);
10437 if (v_idx < 0)
10438 goto err_alloc;
10439 vsi = pf->vsi[v_idx];
cbf61325
ASJ
10440 if (!vsi)
10441 goto err_alloc;
41c445ff
JB
10442 vsi->type = type;
10443 vsi->veb_idx = (veb ? veb->idx : I40E_NO_VEB);
10444
10445 if (type == I40E_VSI_MAIN)
10446 pf->lan_vsi = v_idx;
10447 else if (type == I40E_VSI_SRIOV)
10448 vsi->vf_id = param1;
10449 /* assign it some queues */
74608d17
BT
10450 alloc_queue_pairs = vsi->alloc_queue_pairs *
10451 (i40e_enabled_xdp_vsi(vsi) ? 2 : 1);
10452
10453 ret = i40e_get_lump(pf, pf->qp_pile, alloc_queue_pairs, vsi->idx);
41c445ff 10454 if (ret < 0) {
049a2be8
SN
10455 dev_info(&pf->pdev->dev,
10456 "failed to get tracking for %d queues for VSI %d err=%d\n",
74608d17 10457 alloc_queue_pairs, vsi->seid, ret);
41c445ff
JB
10458 goto err_vsi;
10459 }
10460 vsi->base_queue = ret;
10461
10462 /* get a VSI from the hardware */
10463 vsi->uplink_seid = uplink_seid;
10464 ret = i40e_add_vsi(vsi);
10465 if (ret)
10466 goto err_vsi;
10467
10468 switch (vsi->type) {
10469 /* setup the netdev if needed */
10470 case I40E_VSI_MAIN:
10471 case I40E_VSI_VMDQ2:
10472 ret = i40e_config_netdev(vsi);
10473 if (ret)
10474 goto err_netdev;
10475 ret = register_netdev(vsi->netdev);
10476 if (ret)
10477 goto err_netdev;
10478 vsi->netdev_registered = true;
10479 netif_carrier_off(vsi->netdev);
4e3b35b0
NP
10480#ifdef CONFIG_I40E_DCB
10481 /* Setup DCB netlink interface */
10482 i40e_dcbnl_setup(vsi);
10483#endif /* CONFIG_I40E_DCB */
41c445ff
JB
10484 /* fall through */
10485
10486 case I40E_VSI_FDIR:
10487 /* set up vectors and rings if needed */
10488 ret = i40e_vsi_setup_vectors(vsi);
10489 if (ret)
10490 goto err_msix;
10491
10492 ret = i40e_alloc_rings(vsi);
10493 if (ret)
10494 goto err_rings;
10495
10496 /* map all of the rings to the q_vectors */
10497 i40e_vsi_map_rings_to_vectors(vsi);
10498
10499 i40e_vsi_reset_stats(vsi);
10500 break;
10501
10502 default:
10503 /* no netdev or rings for the other VSI types */
10504 break;
10505 }
10506
d36e41dc 10507 if ((pf->hw_features & I40E_HW_RSS_AQ_CAPABLE) &&
e25d00b8
ASJ
10508 (vsi->type == I40E_VSI_VMDQ2)) {
10509 ret = i40e_vsi_config_rss(vsi);
10510 }
41c445ff
JB
10511 return vsi;
10512
10513err_rings:
10514 i40e_vsi_free_q_vectors(vsi);
10515err_msix:
10516 if (vsi->netdev_registered) {
10517 vsi->netdev_registered = false;
10518 unregister_netdev(vsi->netdev);
10519 free_netdev(vsi->netdev);
10520 vsi->netdev = NULL;
10521 }
10522err_netdev:
10523 i40e_aq_delete_element(&pf->hw, vsi->seid, NULL);
10524err_vsi:
10525 i40e_vsi_clear(vsi);
10526err_alloc:
10527 return NULL;
10528}
10529
10530/**
10531 * i40e_veb_get_bw_info - Query VEB BW information
10532 * @veb: the veb to query
10533 *
10534 * Query the Tx scheduler BW configuration data for given VEB
10535 **/
10536static int i40e_veb_get_bw_info(struct i40e_veb *veb)
10537{
10538 struct i40e_aqc_query_switching_comp_ets_config_resp ets_data;
10539 struct i40e_aqc_query_switching_comp_bw_config_resp bw_data;
10540 struct i40e_pf *pf = veb->pf;
10541 struct i40e_hw *hw = &pf->hw;
10542 u32 tc_bw_max;
10543 int ret = 0;
10544 int i;
10545
10546 ret = i40e_aq_query_switch_comp_bw_config(hw, veb->seid,
10547 &bw_data, NULL);
10548 if (ret) {
10549 dev_info(&pf->pdev->dev,
f1c7e72e
SN
10550 "query veb bw config failed, err %s aq_err %s\n",
10551 i40e_stat_str(&pf->hw, ret),
10552 i40e_aq_str(&pf->hw, hw->aq.asq_last_status));
41c445ff
JB
10553 goto out;
10554 }
10555
10556 ret = i40e_aq_query_switch_comp_ets_config(hw, veb->seid,
10557 &ets_data, NULL);
10558 if (ret) {
10559 dev_info(&pf->pdev->dev,
f1c7e72e
SN
10560 "query veb bw ets config failed, err %s aq_err %s\n",
10561 i40e_stat_str(&pf->hw, ret),
10562 i40e_aq_str(&pf->hw, hw->aq.asq_last_status));
41c445ff
JB
10563 goto out;
10564 }
10565
10566 veb->bw_limit = le16_to_cpu(ets_data.port_bw_limit);
10567 veb->bw_max_quanta = ets_data.tc_bw_max;
10568 veb->is_abs_credits = bw_data.absolute_credits_enable;
23cd1f09 10569 veb->enabled_tc = ets_data.tc_valid_bits;
41c445ff
JB
10570 tc_bw_max = le16_to_cpu(bw_data.tc_bw_max[0]) |
10571 (le16_to_cpu(bw_data.tc_bw_max[1]) << 16);
10572 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
10573 veb->bw_tc_share_credits[i] = bw_data.tc_bw_share_credits[i];
10574 veb->bw_tc_limit_credits[i] =
10575 le16_to_cpu(bw_data.tc_bw_limits[i]);
10576 veb->bw_tc_max_quanta[i] = ((tc_bw_max >> (i*4)) & 0x7);
10577 }
10578
10579out:
10580 return ret;
10581}
10582
10583/**
10584 * i40e_veb_mem_alloc - Allocates the next available struct veb in the PF
10585 * @pf: board private structure
10586 *
10587 * On error: returns error code (negative)
10588 * On success: returns vsi index in PF (positive)
10589 **/
10590static int i40e_veb_mem_alloc(struct i40e_pf *pf)
10591{
10592 int ret = -ENOENT;
10593 struct i40e_veb *veb;
10594 int i;
10595
10596 /* Need to protect the allocation of switch elements at the PF level */
10597 mutex_lock(&pf->switch_mutex);
10598
10599 /* VEB list may be fragmented if VEB creation/destruction has
10600 * been happening. We can afford to do a quick scan to look
10601 * for any free slots in the list.
10602 *
10603 * find next empty veb slot, looping back around if necessary
10604 */
10605 i = 0;
10606 while ((i < I40E_MAX_VEB) && (pf->veb[i] != NULL))
10607 i++;
10608 if (i >= I40E_MAX_VEB) {
10609 ret = -ENOMEM;
10610 goto err_alloc_veb; /* out of VEB slots! */
10611 }
10612
10613 veb = kzalloc(sizeof(*veb), GFP_KERNEL);
10614 if (!veb) {
10615 ret = -ENOMEM;
10616 goto err_alloc_veb;
10617 }
10618 veb->pf = pf;
10619 veb->idx = i;
10620 veb->enabled_tc = 1;
10621
10622 pf->veb[i] = veb;
10623 ret = i;
10624err_alloc_veb:
10625 mutex_unlock(&pf->switch_mutex);
10626 return ret;
10627}
10628
10629/**
10630 * i40e_switch_branch_release - Delete a branch of the switch tree
10631 * @branch: where to start deleting
10632 *
10633 * This uses recursion to find the tips of the branch to be
10634 * removed, deleting until we get back to and can delete this VEB.
10635 **/
10636static void i40e_switch_branch_release(struct i40e_veb *branch)
10637{
10638 struct i40e_pf *pf = branch->pf;
10639 u16 branch_seid = branch->seid;
10640 u16 veb_idx = branch->idx;
10641 int i;
10642
10643 /* release any VEBs on this VEB - RECURSION */
10644 for (i = 0; i < I40E_MAX_VEB; i++) {
10645 if (!pf->veb[i])
10646 continue;
10647 if (pf->veb[i]->uplink_seid == branch->seid)
10648 i40e_switch_branch_release(pf->veb[i]);
10649 }
10650
10651 /* Release the VSIs on this VEB, but not the owner VSI.
10652 *
10653 * NOTE: Removing the last VSI on a VEB has the SIDE EFFECT of removing
10654 * the VEB itself, so don't use (*branch) after this loop.
10655 */
505682cd 10656 for (i = 0; i < pf->num_alloc_vsi; i++) {
41c445ff
JB
10657 if (!pf->vsi[i])
10658 continue;
10659 if (pf->vsi[i]->uplink_seid == branch_seid &&
10660 (pf->vsi[i]->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) {
10661 i40e_vsi_release(pf->vsi[i]);
10662 }
10663 }
10664
10665 /* There's one corner case where the VEB might not have been
10666 * removed, so double check it here and remove it if needed.
10667 * This case happens if the veb was created from the debugfs
10668 * commands and no VSIs were added to it.
10669 */
10670 if (pf->veb[veb_idx])
10671 i40e_veb_release(pf->veb[veb_idx]);
10672}
10673
10674/**
10675 * i40e_veb_clear - remove veb struct
10676 * @veb: the veb to remove
10677 **/
10678static void i40e_veb_clear(struct i40e_veb *veb)
10679{
10680 if (!veb)
10681 return;
10682
10683 if (veb->pf) {
10684 struct i40e_pf *pf = veb->pf;
10685
10686 mutex_lock(&pf->switch_mutex);
10687 if (pf->veb[veb->idx] == veb)
10688 pf->veb[veb->idx] = NULL;
10689 mutex_unlock(&pf->switch_mutex);
10690 }
10691
10692 kfree(veb);
10693}
10694
10695/**
10696 * i40e_veb_release - Delete a VEB and free its resources
10697 * @veb: the VEB being removed
10698 **/
10699void i40e_veb_release(struct i40e_veb *veb)
10700{
10701 struct i40e_vsi *vsi = NULL;
10702 struct i40e_pf *pf;
10703 int i, n = 0;
10704
10705 pf = veb->pf;
10706
10707 /* find the remaining VSI and check for extras */
505682cd 10708 for (i = 0; i < pf->num_alloc_vsi; i++) {
41c445ff
JB
10709 if (pf->vsi[i] && pf->vsi[i]->uplink_seid == veb->seid) {
10710 n++;
10711 vsi = pf->vsi[i];
10712 }
10713 }
10714 if (n != 1) {
10715 dev_info(&pf->pdev->dev,
10716 "can't remove VEB %d with %d VSIs left\n",
10717 veb->seid, n);
10718 return;
10719 }
10720
10721 /* move the remaining VSI to uplink veb */
10722 vsi->flags &= ~I40E_VSI_FLAG_VEB_OWNER;
10723 if (veb->uplink_seid) {
10724 vsi->uplink_seid = veb->uplink_seid;
10725 if (veb->uplink_seid == pf->mac_seid)
10726 vsi->veb_idx = I40E_NO_VEB;
10727 else
10728 vsi->veb_idx = veb->veb_idx;
10729 } else {
10730 /* floating VEB */
10731 vsi->uplink_seid = pf->vsi[pf->lan_vsi]->uplink_seid;
10732 vsi->veb_idx = pf->vsi[pf->lan_vsi]->veb_idx;
10733 }
10734
10735 i40e_aq_delete_element(&pf->hw, veb->seid, NULL);
10736 i40e_veb_clear(veb);
41c445ff
JB
10737}
10738
10739/**
10740 * i40e_add_veb - create the VEB in the switch
10741 * @veb: the VEB to be instantiated
10742 * @vsi: the controlling VSI
10743 **/
10744static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi)
10745{
f1c7e72e 10746 struct i40e_pf *pf = veb->pf;
66fc360a 10747 bool enable_stats = !!(pf->flags & I40E_FLAG_VEB_STATS_ENABLED);
41c445ff
JB
10748 int ret;
10749
f1c7e72e 10750 ret = i40e_aq_add_veb(&pf->hw, veb->uplink_seid, vsi->seid,
5bc16031 10751 veb->enabled_tc, false,
66fc360a 10752 &veb->seid, enable_stats, NULL);
5bc16031
MW
10753
10754 /* get a VEB from the hardware */
41c445ff 10755 if (ret) {
f1c7e72e
SN
10756 dev_info(&pf->pdev->dev,
10757 "couldn't add VEB, err %s aq_err %s\n",
10758 i40e_stat_str(&pf->hw, ret),
10759 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
41c445ff
JB
10760 return -EPERM;
10761 }
10762
10763 /* get statistics counter */
f1c7e72e 10764 ret = i40e_aq_get_veb_parameters(&pf->hw, veb->seid, NULL, NULL,
41c445ff
JB
10765 &veb->stats_idx, NULL, NULL, NULL);
10766 if (ret) {
f1c7e72e
SN
10767 dev_info(&pf->pdev->dev,
10768 "couldn't get VEB statistics idx, err %s aq_err %s\n",
10769 i40e_stat_str(&pf->hw, ret),
10770 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
41c445ff
JB
10771 return -EPERM;
10772 }
10773 ret = i40e_veb_get_bw_info(veb);
10774 if (ret) {
f1c7e72e
SN
10775 dev_info(&pf->pdev->dev,
10776 "couldn't get VEB bw info, err %s aq_err %s\n",
10777 i40e_stat_str(&pf->hw, ret),
10778 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
10779 i40e_aq_delete_element(&pf->hw, veb->seid, NULL);
41c445ff
JB
10780 return -ENOENT;
10781 }
10782
10783 vsi->uplink_seid = veb->seid;
10784 vsi->veb_idx = veb->idx;
10785 vsi->flags |= I40E_VSI_FLAG_VEB_OWNER;
10786
10787 return 0;
10788}
10789
10790/**
10791 * i40e_veb_setup - Set up a VEB
10792 * @pf: board private structure
10793 * @flags: VEB setup flags
10794 * @uplink_seid: the switch element to link to
10795 * @vsi_seid: the initial VSI seid
10796 * @enabled_tc: Enabled TC bit-map
10797 *
10798 * This allocates the sw VEB structure and links it into the switch
10799 * It is possible and legal for this to be a duplicate of an already
10800 * existing VEB. It is also possible for both uplink and vsi seids
10801 * to be zero, in order to create a floating VEB.
10802 *
10803 * Returns pointer to the successfully allocated VEB sw struct on
10804 * success, otherwise returns NULL on failure.
10805 **/
10806struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf, u16 flags,
10807 u16 uplink_seid, u16 vsi_seid,
10808 u8 enabled_tc)
10809{
10810 struct i40e_veb *veb, *uplink_veb = NULL;
10811 int vsi_idx, veb_idx;
10812 int ret;
10813
10814 /* if one seid is 0, the other must be 0 to create a floating relay */
10815 if ((uplink_seid == 0 || vsi_seid == 0) &&
10816 (uplink_seid + vsi_seid != 0)) {
10817 dev_info(&pf->pdev->dev,
10818 "one, not both seid's are 0: uplink=%d vsi=%d\n",
10819 uplink_seid, vsi_seid);
10820 return NULL;
10821 }
10822
10823 /* make sure there is such a vsi and uplink */
505682cd 10824 for (vsi_idx = 0; vsi_idx < pf->num_alloc_vsi; vsi_idx++)
41c445ff
JB
10825 if (pf->vsi[vsi_idx] && pf->vsi[vsi_idx]->seid == vsi_seid)
10826 break;
505682cd 10827 if (vsi_idx >= pf->num_alloc_vsi && vsi_seid != 0) {
41c445ff
JB
10828 dev_info(&pf->pdev->dev, "vsi seid %d not found\n",
10829 vsi_seid);
10830 return NULL;
10831 }
10832
10833 if (uplink_seid && uplink_seid != pf->mac_seid) {
10834 for (veb_idx = 0; veb_idx < I40E_MAX_VEB; veb_idx++) {
10835 if (pf->veb[veb_idx] &&
10836 pf->veb[veb_idx]->seid == uplink_seid) {
10837 uplink_veb = pf->veb[veb_idx];
10838 break;
10839 }
10840 }
10841 if (!uplink_veb) {
10842 dev_info(&pf->pdev->dev,
10843 "uplink seid %d not found\n", uplink_seid);
10844 return NULL;
10845 }
10846 }
10847
10848 /* get veb sw struct */
10849 veb_idx = i40e_veb_mem_alloc(pf);
10850 if (veb_idx < 0)
10851 goto err_alloc;
10852 veb = pf->veb[veb_idx];
10853 veb->flags = flags;
10854 veb->uplink_seid = uplink_seid;
10855 veb->veb_idx = (uplink_veb ? uplink_veb->idx : I40E_NO_VEB);
10856 veb->enabled_tc = (enabled_tc ? enabled_tc : 0x1);
10857
10858 /* create the VEB in the switch */
10859 ret = i40e_add_veb(veb, pf->vsi[vsi_idx]);
10860 if (ret)
10861 goto err_veb;
1bb8b935
SN
10862 if (vsi_idx == pf->lan_vsi)
10863 pf->lan_veb = veb->idx;
41c445ff
JB
10864
10865 return veb;
10866
10867err_veb:
10868 i40e_veb_clear(veb);
10869err_alloc:
10870 return NULL;
10871}
10872
10873/**
b40c82e6 10874 * i40e_setup_pf_switch_element - set PF vars based on switch type
41c445ff
JB
10875 * @pf: board private structure
10876 * @ele: element we are building info from
10877 * @num_reported: total number of elements
10878 * @printconfig: should we print the contents
10879 *
10880 * helper function to assist in extracting a few useful SEID values.
10881 **/
10882static void i40e_setup_pf_switch_element(struct i40e_pf *pf,
10883 struct i40e_aqc_switch_config_element_resp *ele,
10884 u16 num_reported, bool printconfig)
10885{
10886 u16 downlink_seid = le16_to_cpu(ele->downlink_seid);
10887 u16 uplink_seid = le16_to_cpu(ele->uplink_seid);
10888 u8 element_type = ele->element_type;
10889 u16 seid = le16_to_cpu(ele->seid);
10890
10891 if (printconfig)
10892 dev_info(&pf->pdev->dev,
10893 "type=%d seid=%d uplink=%d downlink=%d\n",
10894 element_type, seid, uplink_seid, downlink_seid);
10895
10896 switch (element_type) {
10897 case I40E_SWITCH_ELEMENT_TYPE_MAC:
10898 pf->mac_seid = seid;
10899 break;
10900 case I40E_SWITCH_ELEMENT_TYPE_VEB:
10901 /* Main VEB? */
10902 if (uplink_seid != pf->mac_seid)
10903 break;
10904 if (pf->lan_veb == I40E_NO_VEB) {
10905 int v;
10906
10907 /* find existing or else empty VEB */
10908 for (v = 0; v < I40E_MAX_VEB; v++) {
10909 if (pf->veb[v] && (pf->veb[v]->seid == seid)) {
10910 pf->lan_veb = v;
10911 break;
10912 }
10913 }
10914 if (pf->lan_veb == I40E_NO_VEB) {
10915 v = i40e_veb_mem_alloc(pf);
10916 if (v < 0)
10917 break;
10918 pf->lan_veb = v;
10919 }
10920 }
10921
10922 pf->veb[pf->lan_veb]->seid = seid;
10923 pf->veb[pf->lan_veb]->uplink_seid = pf->mac_seid;
10924 pf->veb[pf->lan_veb]->pf = pf;
10925 pf->veb[pf->lan_veb]->veb_idx = I40E_NO_VEB;
10926 break;
10927 case I40E_SWITCH_ELEMENT_TYPE_VSI:
10928 if (num_reported != 1)
10929 break;
10930 /* This is immediately after a reset so we can assume this is
10931 * the PF's VSI
10932 */
10933 pf->mac_seid = uplink_seid;
10934 pf->pf_seid = downlink_seid;
10935 pf->main_vsi_seid = seid;
10936 if (printconfig)
10937 dev_info(&pf->pdev->dev,
10938 "pf_seid=%d main_vsi_seid=%d\n",
10939 pf->pf_seid, pf->main_vsi_seid);
10940 break;
10941 case I40E_SWITCH_ELEMENT_TYPE_PF:
10942 case I40E_SWITCH_ELEMENT_TYPE_VF:
10943 case I40E_SWITCH_ELEMENT_TYPE_EMP:
10944 case I40E_SWITCH_ELEMENT_TYPE_BMC:
10945 case I40E_SWITCH_ELEMENT_TYPE_PE:
10946 case I40E_SWITCH_ELEMENT_TYPE_PA:
10947 /* ignore these for now */
10948 break;
10949 default:
10950 dev_info(&pf->pdev->dev, "unknown element type=%d seid=%d\n",
10951 element_type, seid);
10952 break;
10953 }
10954}
10955
10956/**
10957 * i40e_fetch_switch_configuration - Get switch config from firmware
10958 * @pf: board private structure
10959 * @printconfig: should we print the contents
10960 *
10961 * Get the current switch configuration from the device and
10962 * extract a few useful SEID values.
10963 **/
10964int i40e_fetch_switch_configuration(struct i40e_pf *pf, bool printconfig)
10965{
10966 struct i40e_aqc_get_switch_config_resp *sw_config;
10967 u16 next_seid = 0;
10968 int ret = 0;
10969 u8 *aq_buf;
10970 int i;
10971
10972 aq_buf = kzalloc(I40E_AQ_LARGE_BUF, GFP_KERNEL);
10973 if (!aq_buf)
10974 return -ENOMEM;
10975
10976 sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
10977 do {
10978 u16 num_reported, num_total;
10979
10980 ret = i40e_aq_get_switch_config(&pf->hw, sw_config,
10981 I40E_AQ_LARGE_BUF,
10982 &next_seid, NULL);
10983 if (ret) {
10984 dev_info(&pf->pdev->dev,
f1c7e72e
SN
10985 "get switch config failed err %s aq_err %s\n",
10986 i40e_stat_str(&pf->hw, ret),
10987 i40e_aq_str(&pf->hw,
10988 pf->hw.aq.asq_last_status));
41c445ff
JB
10989 kfree(aq_buf);
10990 return -ENOENT;
10991 }
10992
10993 num_reported = le16_to_cpu(sw_config->header.num_reported);
10994 num_total = le16_to_cpu(sw_config->header.num_total);
10995
10996 if (printconfig)
10997 dev_info(&pf->pdev->dev,
10998 "header: %d reported %d total\n",
10999 num_reported, num_total);
11000
41c445ff
JB
11001 for (i = 0; i < num_reported; i++) {
11002 struct i40e_aqc_switch_config_element_resp *ele =
11003 &sw_config->element[i];
11004
11005 i40e_setup_pf_switch_element(pf, ele, num_reported,
11006 printconfig);
11007 }
11008 } while (next_seid != 0);
11009
11010 kfree(aq_buf);
11011 return ret;
11012}
11013
11014/**
11015 * i40e_setup_pf_switch - Setup the HW switch on startup or after reset
11016 * @pf: board private structure
bc7d338f 11017 * @reinit: if the Main VSI needs to re-initialized.
41c445ff
JB
11018 *
11019 * Returns 0 on success, negative value on failure
11020 **/
bc7d338f 11021static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit)
41c445ff 11022{
b5569892 11023 u16 flags = 0;
41c445ff
JB
11024 int ret;
11025
11026 /* find out what's out there already */
11027 ret = i40e_fetch_switch_configuration(pf, false);
11028 if (ret) {
11029 dev_info(&pf->pdev->dev,
f1c7e72e
SN
11030 "couldn't fetch switch config, err %s aq_err %s\n",
11031 i40e_stat_str(&pf->hw, ret),
11032 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
41c445ff
JB
11033 return ret;
11034 }
11035 i40e_pf_reset_stats(pf);
11036
b5569892
ASJ
11037 /* set the switch config bit for the whole device to
11038 * support limited promisc or true promisc
11039 * when user requests promisc. The default is limited
11040 * promisc.
11041 */
11042
11043 if ((pf->hw.pf_id == 0) &&
11044 !(pf->flags & I40E_FLAG_TRUE_PROMISC_SUPPORT))
11045 flags = I40E_AQ_SET_SWITCH_CFG_PROMISC;
11046
11047 if (pf->hw.pf_id == 0) {
11048 u16 valid_flags;
11049
11050 valid_flags = I40E_AQ_SET_SWITCH_CFG_PROMISC;
11051 ret = i40e_aq_set_switch_config(&pf->hw, flags, valid_flags,
11052 NULL);
11053 if (ret && pf->hw.aq.asq_last_status != I40E_AQ_RC_ESRCH) {
11054 dev_info(&pf->pdev->dev,
11055 "couldn't set switch config bits, err %s aq_err %s\n",
11056 i40e_stat_str(&pf->hw, ret),
11057 i40e_aq_str(&pf->hw,
11058 pf->hw.aq.asq_last_status));
11059 /* not a fatal problem, just keep going */
11060 }
11061 }
11062
41c445ff 11063 /* first time setup */
bc7d338f 11064 if (pf->lan_vsi == I40E_NO_VSI || reinit) {
41c445ff
JB
11065 struct i40e_vsi *vsi = NULL;
11066 u16 uplink_seid;
11067
11068 /* Set up the PF VSI associated with the PF's main VSI
11069 * that is already in the HW switch
11070 */
11071 if (pf->lan_veb != I40E_NO_VEB && pf->veb[pf->lan_veb])
11072 uplink_seid = pf->veb[pf->lan_veb]->seid;
11073 else
11074 uplink_seid = pf->mac_seid;
bc7d338f
ASJ
11075 if (pf->lan_vsi == I40E_NO_VSI)
11076 vsi = i40e_vsi_setup(pf, I40E_VSI_MAIN, uplink_seid, 0);
11077 else if (reinit)
11078 vsi = i40e_vsi_reinit_setup(pf->vsi[pf->lan_vsi]);
41c445ff
JB
11079 if (!vsi) {
11080 dev_info(&pf->pdev->dev, "setup of MAIN VSI failed\n");
11081 i40e_fdir_teardown(pf);
11082 return -EAGAIN;
11083 }
41c445ff
JB
11084 } else {
11085 /* force a reset of TC and queue layout configurations */
11086 u8 enabled_tc = pf->vsi[pf->lan_vsi]->tc_config.enabled_tc;
6995b36c 11087
41c445ff
JB
11088 pf->vsi[pf->lan_vsi]->tc_config.enabled_tc = 0;
11089 pf->vsi[pf->lan_vsi]->seid = pf->main_vsi_seid;
11090 i40e_vsi_config_tc(pf->vsi[pf->lan_vsi], enabled_tc);
11091 }
11092 i40e_vlan_stripping_disable(pf->vsi[pf->lan_vsi]);
11093
cbf61325
ASJ
11094 i40e_fdir_sb_setup(pf);
11095
41c445ff
JB
11096 /* Setup static PF queue filter control settings */
11097 ret = i40e_setup_pf_filter_control(pf);
11098 if (ret) {
11099 dev_info(&pf->pdev->dev, "setup_pf_filter_control failed: %d\n",
11100 ret);
11101 /* Failure here should not stop continuing other steps */
11102 }
11103
11104 /* enable RSS in the HW, even for only one queue, as the stack can use
11105 * the hash
11106 */
11107 if ((pf->flags & I40E_FLAG_RSS_ENABLED))
043dd650 11108 i40e_pf_config_rss(pf);
41c445ff
JB
11109
11110 /* fill in link information and enable LSE reporting */
a34a6711
MW
11111 i40e_link_event(pf);
11112
d52c20b7 11113 /* Initialize user-specific link properties */
41c445ff
JB
11114 pf->fc_autoneg_status = ((pf->hw.phy.link_info.an_info &
11115 I40E_AQ_AN_COMPLETED) ? true : false);
d52c20b7 11116
beb0dff1
JK
11117 i40e_ptp_init(pf);
11118
1f190d93
AD
11119 /* repopulate tunnel port filters */
11120 i40e_sync_udp_filters(pf);
11121
41c445ff
JB
11122 return ret;
11123}
11124
41c445ff
JB
11125/**
11126 * i40e_determine_queue_usage - Work out queue distribution
11127 * @pf: board private structure
11128 **/
11129static void i40e_determine_queue_usage(struct i40e_pf *pf)
11130{
41c445ff 11131 int queues_left;
e50d5751 11132 int q_max;
41c445ff
JB
11133
11134 pf->num_lan_qps = 0;
41c445ff
JB
11135
11136 /* Find the max queues to be put into basic use. We'll always be
11137 * using TC0, whether or not DCB is running, and TC0 will get the
11138 * big RSS set.
11139 */
11140 queues_left = pf->hw.func_caps.num_tx_qp;
11141
cbf61325 11142 if ((queues_left == 1) ||
9aa7e935 11143 !(pf->flags & I40E_FLAG_MSIX_ENABLED)) {
41c445ff
JB
11144 /* one qp for PF, no queues for anything else */
11145 queues_left = 0;
acd65448 11146 pf->alloc_rss_size = pf->num_lan_qps = 1;
41c445ff
JB
11147
11148 /* make sure all the fancies are disabled */
60ea5f83 11149 pf->flags &= ~(I40E_FLAG_RSS_ENABLED |
e3219ce6 11150 I40E_FLAG_IWARP_ENABLED |
60ea5f83
JB
11151 I40E_FLAG_FD_SB_ENABLED |
11152 I40E_FLAG_FD_ATR_ENABLED |
4d9b6043 11153 I40E_FLAG_DCB_CAPABLE |
a036244c 11154 I40E_FLAG_DCB_ENABLED |
60ea5f83
JB
11155 I40E_FLAG_SRIOV_ENABLED |
11156 I40E_FLAG_VMDQ_ENABLED);
9aa7e935
FZ
11157 } else if (!(pf->flags & (I40E_FLAG_RSS_ENABLED |
11158 I40E_FLAG_FD_SB_ENABLED |
bbe7d0e0 11159 I40E_FLAG_FD_ATR_ENABLED |
4d9b6043 11160 I40E_FLAG_DCB_CAPABLE))) {
9aa7e935 11161 /* one qp for PF */
acd65448 11162 pf->alloc_rss_size = pf->num_lan_qps = 1;
9aa7e935
FZ
11163 queues_left -= pf->num_lan_qps;
11164
11165 pf->flags &= ~(I40E_FLAG_RSS_ENABLED |
e3219ce6 11166 I40E_FLAG_IWARP_ENABLED |
9aa7e935
FZ
11167 I40E_FLAG_FD_SB_ENABLED |
11168 I40E_FLAG_FD_ATR_ENABLED |
11169 I40E_FLAG_DCB_ENABLED |
11170 I40E_FLAG_VMDQ_ENABLED);
41c445ff 11171 } else {
cbf61325 11172 /* Not enough queues for all TCs */
4d9b6043 11173 if ((pf->flags & I40E_FLAG_DCB_CAPABLE) &&
cbf61325 11174 (queues_left < I40E_MAX_TRAFFIC_CLASS)) {
a036244c
DE
11175 pf->flags &= ~(I40E_FLAG_DCB_CAPABLE |
11176 I40E_FLAG_DCB_ENABLED);
cbf61325
ASJ
11177 dev_info(&pf->pdev->dev, "not enough queues for DCB. DCB is disabled.\n");
11178 }
e50d5751
SN
11179
11180 /* limit lan qps to the smaller of qps, cpus or msix */
11181 q_max = max_t(int, pf->rss_size_max, num_online_cpus());
11182 q_max = min_t(int, q_max, pf->hw.func_caps.num_tx_qp);
11183 q_max = min_t(int, q_max, pf->hw.func_caps.num_msix_vectors);
11184 pf->num_lan_qps = q_max;
9a3bd2f1 11185
cbf61325
ASJ
11186 queues_left -= pf->num_lan_qps;
11187 }
11188
11189 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
11190 if (queues_left > 1) {
11191 queues_left -= 1; /* save 1 queue for FD */
11192 } else {
11193 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
11194 dev_info(&pf->pdev->dev, "not enough queues for Flow Director. Flow Director feature is disabled\n");
11195 }
41c445ff
JB
11196 }
11197
11198 if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
11199 pf->num_vf_qps && pf->num_req_vfs && queues_left) {
cbf61325
ASJ
11200 pf->num_req_vfs = min_t(int, pf->num_req_vfs,
11201 (queues_left / pf->num_vf_qps));
41c445ff
JB
11202 queues_left -= (pf->num_req_vfs * pf->num_vf_qps);
11203 }
11204
11205 if ((pf->flags & I40E_FLAG_VMDQ_ENABLED) &&
11206 pf->num_vmdq_vsis && pf->num_vmdq_qps && queues_left) {
11207 pf->num_vmdq_vsis = min_t(int, pf->num_vmdq_vsis,
11208 (queues_left / pf->num_vmdq_qps));
11209 queues_left -= (pf->num_vmdq_vsis * pf->num_vmdq_qps);
11210 }
11211
f8ff1464 11212 pf->queues_left = queues_left;
8279e495
NP
11213 dev_dbg(&pf->pdev->dev,
11214 "qs_avail=%d FD SB=%d lan_qs=%d lan_tc0=%d vf=%d*%d vmdq=%d*%d, remaining=%d\n",
11215 pf->hw.func_caps.num_tx_qp,
11216 !!(pf->flags & I40E_FLAG_FD_SB_ENABLED),
acd65448
HZ
11217 pf->num_lan_qps, pf->alloc_rss_size, pf->num_req_vfs,
11218 pf->num_vf_qps, pf->num_vmdq_vsis, pf->num_vmdq_qps,
11219 queues_left);
41c445ff
JB
11220}
11221
11222/**
11223 * i40e_setup_pf_filter_control - Setup PF static filter control
11224 * @pf: PF to be setup
11225 *
b40c82e6 11226 * i40e_setup_pf_filter_control sets up a PF's initial filter control
41c445ff
JB
11227 * settings. If PE/FCoE are enabled then it will also set the per PF
11228 * based filter sizes required for them. It also enables Flow director,
11229 * ethertype and macvlan type filter settings for the pf.
11230 *
11231 * Returns 0 on success, negative on failure
11232 **/
11233static int i40e_setup_pf_filter_control(struct i40e_pf *pf)
11234{
11235 struct i40e_filter_control_settings *settings = &pf->filter_settings;
11236
11237 settings->hash_lut_size = I40E_HASH_LUT_SIZE_128;
11238
11239 /* Flow Director is enabled */
60ea5f83 11240 if (pf->flags & (I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED))
41c445ff
JB
11241 settings->enable_fdir = true;
11242
11243 /* Ethtype and MACVLAN filters enabled for PF */
11244 settings->enable_ethtype = true;
11245 settings->enable_macvlan = true;
11246
11247 if (i40e_set_filter_control(&pf->hw, settings))
11248 return -ENOENT;
11249
11250 return 0;
11251}
11252
0c22b3dd 11253#define INFO_STRING_LEN 255
7fd89545 11254#define REMAIN(__x) (INFO_STRING_LEN - (__x))
0c22b3dd
JB
11255static void i40e_print_features(struct i40e_pf *pf)
11256{
11257 struct i40e_hw *hw = &pf->hw;
3b195843
JP
11258 char *buf;
11259 int i;
0c22b3dd 11260
3b195843
JP
11261 buf = kmalloc(INFO_STRING_LEN, GFP_KERNEL);
11262 if (!buf)
0c22b3dd 11263 return;
0c22b3dd 11264
3b195843 11265 i = snprintf(buf, INFO_STRING_LEN, "Features: PF-id[%d]", hw->pf_id);
0c22b3dd 11266#ifdef CONFIG_PCI_IOV
3b195843 11267 i += snprintf(&buf[i], REMAIN(i), " VFs: %d", pf->num_req_vfs);
0c22b3dd 11268#endif
1a557afc 11269 i += snprintf(&buf[i], REMAIN(i), " VSIs: %d QP: %d",
7fd89545 11270 pf->hw.func_caps.num_vsis,
1a557afc 11271 pf->vsi[pf->lan_vsi]->num_queue_pairs);
0c22b3dd 11272 if (pf->flags & I40E_FLAG_RSS_ENABLED)
3b195843 11273 i += snprintf(&buf[i], REMAIN(i), " RSS");
0c22b3dd 11274 if (pf->flags & I40E_FLAG_FD_ATR_ENABLED)
3b195843 11275 i += snprintf(&buf[i], REMAIN(i), " FD_ATR");
c6423ff1 11276 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
3b195843
JP
11277 i += snprintf(&buf[i], REMAIN(i), " FD_SB");
11278 i += snprintf(&buf[i], REMAIN(i), " NTUPLE");
c6423ff1 11279 }
4d9b6043 11280 if (pf->flags & I40E_FLAG_DCB_CAPABLE)
3b195843 11281 i += snprintf(&buf[i], REMAIN(i), " DCB");
3b195843 11282 i += snprintf(&buf[i], REMAIN(i), " VxLAN");
6a899024 11283 i += snprintf(&buf[i], REMAIN(i), " Geneve");
0c22b3dd 11284 if (pf->flags & I40E_FLAG_PTP)
3b195843 11285 i += snprintf(&buf[i], REMAIN(i), " PTP");
6dec1017 11286 if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED)
3b195843 11287 i += snprintf(&buf[i], REMAIN(i), " VEB");
6dec1017 11288 else
3b195843 11289 i += snprintf(&buf[i], REMAIN(i), " VEPA");
0c22b3dd 11290
3b195843
JP
11291 dev_info(&pf->pdev->dev, "%s\n", buf);
11292 kfree(buf);
7fd89545 11293 WARN_ON(i > INFO_STRING_LEN);
0c22b3dd
JB
11294}
11295
b499ffb0
SV
11296/**
11297 * i40e_get_platform_mac_addr - get platform-specific MAC address
b499ffb0
SV
11298 * @pdev: PCI device information struct
11299 * @pf: board private structure
11300 *
41c4c2b5
JK
11301 * Look up the MAC address for the device. First we'll try
11302 * eth_platform_get_mac_address, which will check Open Firmware, or arch
11303 * specific fallback. Otherwise, we'll default to the stored value in
11304 * firmware.
b499ffb0
SV
11305 **/
11306static void i40e_get_platform_mac_addr(struct pci_dev *pdev, struct i40e_pf *pf)
11307{
41c4c2b5
JK
11308 if (eth_platform_get_mac_address(&pdev->dev, pf->hw.mac.addr))
11309 i40e_get_mac_addr(&pf->hw, pf->hw.mac.addr);
b499ffb0
SV
11310}
11311
41c445ff
JB
11312/**
11313 * i40e_probe - Device initialization routine
11314 * @pdev: PCI device information struct
11315 * @ent: entry in i40e_pci_tbl
11316 *
b40c82e6
JK
11317 * i40e_probe initializes a PF identified by a pci_dev structure.
11318 * The OS initialization, configuring of the PF private structure,
41c445ff
JB
11319 * and a hardware reset occur.
11320 *
11321 * Returns 0 on success, negative on failure
11322 **/
11323static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
11324{
e827845c 11325 struct i40e_aq_get_phy_abilities_resp abilities;
41c445ff
JB
11326 struct i40e_pf *pf;
11327 struct i40e_hw *hw;
93cd765b 11328 static u16 pfs_found;
1d5109d1 11329 u16 wol_nvm_bits;
d4dfb81a 11330 u16 link_status;
6f66a484 11331 int err;
4f2f017c 11332 u32 val;
8a9eb7d3 11333 u32 i;
58fc3267 11334 u8 set_fc_aq_fail;
41c445ff
JB
11335
11336 err = pci_enable_device_mem(pdev);
11337 if (err)
11338 return err;
11339
11340 /* set up for high or low dma */
6494294f 11341 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
6494294f 11342 if (err) {
e3e3bfdd
JS
11343 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
11344 if (err) {
11345 dev_err(&pdev->dev,
11346 "DMA configuration failed: 0x%x\n", err);
11347 goto err_dma;
11348 }
41c445ff
JB
11349 }
11350
11351 /* set up pci connections */
56d766d6 11352 err = pci_request_mem_regions(pdev, i40e_driver_name);
41c445ff
JB
11353 if (err) {
11354 dev_info(&pdev->dev,
11355 "pci_request_selected_regions failed %d\n", err);
11356 goto err_pci_reg;
11357 }
11358
11359 pci_enable_pcie_error_reporting(pdev);
11360 pci_set_master(pdev);
11361
11362 /* Now that we have a PCI connection, we need to do the
11363 * low level device setup. This is primarily setting up
11364 * the Admin Queue structures and then querying for the
11365 * device's current profile information.
11366 */
11367 pf = kzalloc(sizeof(*pf), GFP_KERNEL);
11368 if (!pf) {
11369 err = -ENOMEM;
11370 goto err_pf_alloc;
11371 }
11372 pf->next_vsi = 0;
11373 pf->pdev = pdev;
9e6c9c0f 11374 set_bit(__I40E_DOWN, pf->state);
41c445ff
JB
11375
11376 hw = &pf->hw;
11377 hw->back = pf;
232f4706 11378
2ac8b675
SN
11379 pf->ioremap_len = min_t(int, pci_resource_len(pdev, 0),
11380 I40E_MAX_CSR_SPACE);
232f4706 11381
2ac8b675 11382 hw->hw_addr = ioremap(pci_resource_start(pdev, 0), pf->ioremap_len);
41c445ff
JB
11383 if (!hw->hw_addr) {
11384 err = -EIO;
11385 dev_info(&pdev->dev, "ioremap(0x%04x, 0x%04x) failed: 0x%x\n",
11386 (unsigned int)pci_resource_start(pdev, 0),
2ac8b675 11387 pf->ioremap_len, err);
41c445ff
JB
11388 goto err_ioremap;
11389 }
11390 hw->vendor_id = pdev->vendor;
11391 hw->device_id = pdev->device;
11392 pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
11393 hw->subsystem_vendor_id = pdev->subsystem_vendor;
11394 hw->subsystem_device_id = pdev->subsystem_device;
11395 hw->bus.device = PCI_SLOT(pdev->devfn);
11396 hw->bus.func = PCI_FUNC(pdev->devfn);
b3f028fc 11397 hw->bus.bus_id = pdev->bus->number;
93cd765b 11398 pf->instance = pfs_found;
41c445ff 11399
ab243ec9
SP
11400 /* Select something other than the 802.1ad ethertype for the
11401 * switch to use internally and drop on ingress.
11402 */
11403 hw->switch_tag = 0xffff;
11404 hw->first_tag = ETH_P_8021AD;
11405 hw->second_tag = ETH_P_8021Q;
11406
0e588de1
JK
11407 INIT_LIST_HEAD(&pf->l3_flex_pit_list);
11408 INIT_LIST_HEAD(&pf->l4_flex_pit_list);
11409
de03d2b0
SN
11410 /* set up the locks for the AQ, do this only once in probe
11411 * and destroy them only once in remove
11412 */
11413 mutex_init(&hw->aq.asq_mutex);
11414 mutex_init(&hw->aq.arq_mutex);
11415
5d4ca23e
AD
11416 pf->msg_enable = netif_msg_init(debug,
11417 NETIF_MSG_DRV |
11418 NETIF_MSG_PROBE |
11419 NETIF_MSG_LINK);
11420 if (debug < -1)
11421 pf->hw.debug_mask = debug;
5b5faa43 11422
7134f9ce
JB
11423 /* do a special CORER for clearing PXE mode once at init */
11424 if (hw->revision_id == 0 &&
11425 (rd32(hw, I40E_GLLAN_RCTL_0) & I40E_GLLAN_RCTL_0_PXE_MODE_MASK)) {
11426 wr32(hw, I40E_GLGEN_RTRIG, I40E_GLGEN_RTRIG_CORER_MASK);
11427 i40e_flush(hw);
11428 msleep(200);
11429 pf->corer_count++;
11430
11431 i40e_clear_pxe_mode(hw);
11432 }
11433
41c445ff 11434 /* Reset here to make sure all is clean and to define PF 'n' */
838d41d9 11435 i40e_clear_hw(hw);
41c445ff
JB
11436 err = i40e_pf_reset(hw);
11437 if (err) {
11438 dev_info(&pdev->dev, "Initial pf_reset failed: %d\n", err);
11439 goto err_pf_reset;
11440 }
11441 pf->pfr_count++;
11442
11443 hw->aq.num_arq_entries = I40E_AQ_LEN;
11444 hw->aq.num_asq_entries = I40E_AQ_LEN;
11445 hw->aq.arq_buf_size = I40E_MAX_AQ_BUF_SIZE;
11446 hw->aq.asq_buf_size = I40E_MAX_AQ_BUF_SIZE;
11447 pf->adminq_work_limit = I40E_AQ_WORK_LIMIT;
b2008cbf 11448
b294ac70 11449 snprintf(pf->int_name, sizeof(pf->int_name) - 1,
b2008cbf
CW
11450 "%s-%s:misc",
11451 dev_driver_string(&pf->pdev->dev), dev_name(&pdev->dev));
41c445ff
JB
11452
11453 err = i40e_init_shared_code(hw);
11454 if (err) {
b2a75c58
ASJ
11455 dev_warn(&pdev->dev, "unidentified MAC or BLANK NVM: %d\n",
11456 err);
41c445ff
JB
11457 goto err_pf_reset;
11458 }
11459
d52c20b7
JB
11460 /* set up a default setting for link flow control */
11461 pf->hw.fc.requested_mode = I40E_FC_NONE;
11462
41c445ff 11463 err = i40e_init_adminq(hw);
2b2426a7
CW
11464 if (err) {
11465 if (err == I40E_ERR_FIRMWARE_API_VERSION)
11466 dev_info(&pdev->dev,
11467 "The driver for the device stopped because the NVM image is newer than expected. You must install the most recent version of the network driver.\n");
11468 else
11469 dev_info(&pdev->dev,
11470 "The driver for the device stopped because the device firmware failed to init. Try updating your NVM image.\n");
11471
11472 goto err_pf_reset;
11473 }
5bbb2e20 11474 i40e_get_oem_version(hw);
f0b44440 11475
6dec1017
SN
11476 /* provide nvm, fw, api versions */
11477 dev_info(&pdev->dev, "fw %d.%d.%05d api %d.%d nvm %s\n",
11478 hw->aq.fw_maj_ver, hw->aq.fw_min_ver, hw->aq.fw_build,
11479 hw->aq.api_maj_ver, hw->aq.api_min_ver,
11480 i40e_nvm_version_str(hw));
f0b44440 11481
7aa67613 11482 if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
22b96551 11483 hw->aq.api_min_ver > I40E_FW_MINOR_VERSION(hw))
278b6f62 11484 dev_info(&pdev->dev,
7aa67613 11485 "The driver for the device detected a newer version of the NVM image than expected. Please install the most recent version of the network driver.\n");
e04ea002 11486 else if (hw->aq.api_maj_ver == 1 && hw->aq.api_min_ver < 4)
278b6f62 11487 dev_info(&pdev->dev,
7aa67613 11488 "The driver for the device detected an older version of the NVM image than expected. Please update the NVM image.\n");
278b6f62 11489
4eb3f768
SN
11490 i40e_verify_eeprom(pf);
11491
2c5fe33b
JB
11492 /* Rev 0 hardware was never productized */
11493 if (hw->revision_id < 1)
11494 dev_warn(&pdev->dev, "This device is a pre-production adapter/LOM. Please be aware there may be issues with your hardware. If you are experiencing problems please contact your Intel or hardware representative who provided you with this hardware.\n");
11495
6ff4ef86 11496 i40e_clear_pxe_mode(hw);
41c445ff
JB
11497 err = i40e_get_capabilities(pf);
11498 if (err)
11499 goto err_adminq_setup;
11500
11501 err = i40e_sw_init(pf);
11502 if (err) {
11503 dev_info(&pdev->dev, "sw_init failed: %d\n", err);
11504 goto err_sw_init;
11505 }
11506
11507 err = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
c76cb6ed 11508 hw->func_caps.num_rx_qp, 0, 0);
41c445ff
JB
11509 if (err) {
11510 dev_info(&pdev->dev, "init_lan_hmc failed: %d\n", err);
11511 goto err_init_lan_hmc;
11512 }
11513
11514 err = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
11515 if (err) {
11516 dev_info(&pdev->dev, "configure_lan_hmc failed: %d\n", err);
11517 err = -ENOENT;
11518 goto err_configure_lan_hmc;
11519 }
11520
b686ece5
NP
11521 /* Disable LLDP for NICs that have firmware versions lower than v4.3.
11522 * Ignore error return codes because if it was already disabled via
11523 * hardware settings this will fail
11524 */
d36e41dc 11525 if (pf->hw_features & I40E_HW_STOP_FW_LLDP) {
b686ece5
NP
11526 dev_info(&pdev->dev, "Stopping firmware LLDP agent.\n");
11527 i40e_aq_stop_lldp(hw, true, NULL);
11528 }
11529
b499ffb0
SV
11530 /* allow a platform config to override the HW addr */
11531 i40e_get_platform_mac_addr(pdev, pf);
41c4c2b5 11532
f62b5060 11533 if (!is_valid_ether_addr(hw->mac.addr)) {
41c445ff
JB
11534 dev_info(&pdev->dev, "invalid MAC address %pM\n", hw->mac.addr);
11535 err = -EIO;
11536 goto err_mac_addr;
11537 }
11538 dev_info(&pdev->dev, "MAC address: %pM\n", hw->mac.addr);
9a173901 11539 ether_addr_copy(hw->mac.perm_addr, hw->mac.addr);
1f224ad2
NP
11540 i40e_get_port_mac_addr(hw, hw->mac.port_addr);
11541 if (is_valid_ether_addr(hw->mac.port_addr))
d36e41dc 11542 pf->hw_features |= I40E_HW_PORT_ID_VALID;
41c445ff
JB
11543
11544 pci_set_drvdata(pdev, pf);
11545 pci_save_state(pdev);
4e3b35b0
NP
11546#ifdef CONFIG_I40E_DCB
11547 err = i40e_init_pf_dcb(pf);
11548 if (err) {
aebfc816 11549 dev_info(&pdev->dev, "DCB init failed %d, disabled\n", err);
c17ef430 11550 pf->flags &= ~(I40E_FLAG_DCB_CAPABLE | I40E_FLAG_DCB_ENABLED);
014269ff 11551 /* Continue without DCB enabled */
4e3b35b0
NP
11552 }
11553#endif /* CONFIG_I40E_DCB */
41c445ff
JB
11554
11555 /* set up periodic task facility */
11556 setup_timer(&pf->service_timer, i40e_service_timer, (unsigned long)pf);
11557 pf->service_timer_period = HZ;
11558
11559 INIT_WORK(&pf->service_task, i40e_service_task);
0da36b97 11560 clear_bit(__I40E_SERVICE_SCHED, pf->state);
41c445ff 11561
1d5109d1
SN
11562 /* NVM bit on means WoL disabled for the port */
11563 i40e_read_nvm_word(hw, I40E_SR_NVM_WAKE_ON_LAN, &wol_nvm_bits);
75f5cea9 11564 if (BIT (hw->port) & wol_nvm_bits || hw->partition_id != 1)
1d5109d1
SN
11565 pf->wol_en = false;
11566 else
11567 pf->wol_en = true;
8e2773ae
SN
11568 device_set_wakeup_enable(&pf->pdev->dev, pf->wol_en);
11569
41c445ff
JB
11570 /* set up the main switch operations */
11571 i40e_determine_queue_usage(pf);
c1147280
JB
11572 err = i40e_init_interrupt_scheme(pf);
11573 if (err)
11574 goto err_switch_setup;
41c445ff 11575
505682cd
MW
11576 /* The number of VSIs reported by the FW is the minimum guaranteed
11577 * to us; HW supports far more and we share the remaining pool with
11578 * the other PFs. We allocate space for more than the guarantee with
11579 * the understanding that we might not get them all later.
41c445ff 11580 */
505682cd
MW
11581 if (pf->hw.func_caps.num_vsis < I40E_MIN_VSI_ALLOC)
11582 pf->num_alloc_vsi = I40E_MIN_VSI_ALLOC;
11583 else
11584 pf->num_alloc_vsi = pf->hw.func_caps.num_vsis;
11585
11586 /* Set up the *vsi struct and our local tracking of the MAIN PF vsi. */
d17038d6
JB
11587 pf->vsi = kcalloc(pf->num_alloc_vsi, sizeof(struct i40e_vsi *),
11588 GFP_KERNEL);
ed87ac09
WY
11589 if (!pf->vsi) {
11590 err = -ENOMEM;
41c445ff 11591 goto err_switch_setup;
ed87ac09 11592 }
41c445ff 11593
fa11cb3d
ASJ
11594#ifdef CONFIG_PCI_IOV
11595 /* prep for VF support */
11596 if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
11597 (pf->flags & I40E_FLAG_MSIX_ENABLED) &&
0da36b97 11598 !test_bit(__I40E_BAD_EEPROM, pf->state)) {
fa11cb3d
ASJ
11599 if (pci_num_vf(pdev))
11600 pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
11601 }
11602#endif
bc7d338f 11603 err = i40e_setup_pf_switch(pf, false);
41c445ff
JB
11604 if (err) {
11605 dev_info(&pdev->dev, "setup_pf_switch failed: %d\n", err);
11606 goto err_vsis;
11607 }
58fc3267
HZ
11608
11609 /* Make sure flow control is set according to current settings */
11610 err = i40e_set_fc(hw, &set_fc_aq_fail, true);
11611 if (set_fc_aq_fail & I40E_SET_FC_AQ_FAIL_GET)
11612 dev_dbg(&pf->pdev->dev,
11613 "Set fc with err %s aq_err %s on get_phy_cap\n",
11614 i40e_stat_str(hw, err),
11615 i40e_aq_str(hw, hw->aq.asq_last_status));
11616 if (set_fc_aq_fail & I40E_SET_FC_AQ_FAIL_SET)
11617 dev_dbg(&pf->pdev->dev,
11618 "Set fc with err %s aq_err %s on set_phy_config\n",
11619 i40e_stat_str(hw, err),
11620 i40e_aq_str(hw, hw->aq.asq_last_status));
11621 if (set_fc_aq_fail & I40E_SET_FC_AQ_FAIL_UPDATE)
11622 dev_dbg(&pf->pdev->dev,
11623 "Set fc with err %s aq_err %s on get_link_info\n",
11624 i40e_stat_str(hw, err),
11625 i40e_aq_str(hw, hw->aq.asq_last_status));
11626
8a9eb7d3 11627 /* if FDIR VSI was set up, start it now */
505682cd 11628 for (i = 0; i < pf->num_alloc_vsi; i++) {
8a9eb7d3
SN
11629 if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) {
11630 i40e_vsi_open(pf->vsi[i]);
11631 break;
11632 }
11633 }
41c445ff 11634
2f0aff41
SN
11635 /* The driver only wants link up/down and module qualification
11636 * reports from firmware. Note the negative logic.
7e2453fe
JB
11637 */
11638 err = i40e_aq_set_phy_int_mask(&pf->hw,
2f0aff41 11639 ~(I40E_AQ_EVENT_LINK_UPDOWN |
867a79e3 11640 I40E_AQ_EVENT_MEDIA_NA |
2f0aff41 11641 I40E_AQ_EVENT_MODULE_QUAL_FAIL), NULL);
7e2453fe 11642 if (err)
f1c7e72e
SN
11643 dev_info(&pf->pdev->dev, "set phy mask fail, err %s aq_err %s\n",
11644 i40e_stat_str(&pf->hw, err),
11645 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
7e2453fe 11646
4f2f017c
ASJ
11647 /* Reconfigure hardware for allowing smaller MSS in the case
11648 * of TSO, so that we avoid the MDD being fired and causing
11649 * a reset in the case of small MSS+TSO.
11650 */
11651 val = rd32(hw, I40E_REG_MSS);
11652 if ((val & I40E_REG_MSS_MIN_MASK) > I40E_64BYTE_MSS) {
11653 val &= ~I40E_REG_MSS_MIN_MASK;
11654 val |= I40E_64BYTE_MSS;
11655 wr32(hw, I40E_REG_MSS, val);
11656 }
11657
d36e41dc 11658 if (pf->hw_features & I40E_HW_RESTART_AUTONEG) {
025b4a54
ASJ
11659 msleep(75);
11660 err = i40e_aq_set_link_restart_an(&pf->hw, true, NULL);
11661 if (err)
f1c7e72e
SN
11662 dev_info(&pf->pdev->dev, "link restart failed, err %s aq_err %s\n",
11663 i40e_stat_str(&pf->hw, err),
11664 i40e_aq_str(&pf->hw,
11665 pf->hw.aq.asq_last_status));
cafa2ee6 11666 }
41c445ff
JB
11667 /* The main driver is (mostly) up and happy. We need to set this state
11668 * before setting up the misc vector or we get a race and the vector
11669 * ends up disabled forever.
11670 */
9e6c9c0f 11671 clear_bit(__I40E_DOWN, pf->state);
41c445ff
JB
11672
11673 /* In case of MSIX we are going to setup the misc vector right here
11674 * to handle admin queue events etc. In case of legacy and MSI
11675 * the misc functionality and queue processing is combined in
11676 * the same vector and that gets setup at open.
11677 */
11678 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
11679 err = i40e_setup_misc_vector(pf);
11680 if (err) {
11681 dev_info(&pdev->dev,
11682 "setup of misc vector failed: %d\n", err);
11683 goto err_vsis;
11684 }
11685 }
11686
df805f62 11687#ifdef CONFIG_PCI_IOV
41c445ff
JB
11688 /* prep for VF support */
11689 if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
4eb3f768 11690 (pf->flags & I40E_FLAG_MSIX_ENABLED) &&
0da36b97 11691 !test_bit(__I40E_BAD_EEPROM, pf->state)) {
41c445ff
JB
11692 /* disable link interrupts for VFs */
11693 val = rd32(hw, I40E_PFGEN_PORTMDIO_NUM);
11694 val &= ~I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_MASK;
11695 wr32(hw, I40E_PFGEN_PORTMDIO_NUM, val);
11696 i40e_flush(hw);
4aeec010
MW
11697
11698 if (pci_num_vf(pdev)) {
11699 dev_info(&pdev->dev,
11700 "Active VFs found, allocating resources.\n");
11701 err = i40e_alloc_vfs(pf, pci_num_vf(pdev));
11702 if (err)
11703 dev_info(&pdev->dev,
11704 "Error %d allocating resources for existing VFs\n",
11705 err);
11706 }
41c445ff 11707 }
df805f62 11708#endif /* CONFIG_PCI_IOV */
41c445ff 11709
e3219ce6
ASJ
11710 if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
11711 pf->iwarp_base_vector = i40e_get_lump(pf, pf->irq_pile,
11712 pf->num_iwarp_msix,
11713 I40E_IWARP_IRQ_PILE_ID);
11714 if (pf->iwarp_base_vector < 0) {
11715 dev_info(&pdev->dev,
11716 "failed to get tracking for %d vectors for IWARP err=%d\n",
11717 pf->num_iwarp_msix, pf->iwarp_base_vector);
11718 pf->flags &= ~I40E_FLAG_IWARP_ENABLED;
11719 }
11720 }
93cd765b 11721
41c445ff
JB
11722 i40e_dbg_pf_init(pf);
11723
11724 /* tell the firmware that we're starting */
44033fac 11725 i40e_send_version(pf);
41c445ff
JB
11726
11727 /* since everything's happy, start the service_task timer */
11728 mod_timer(&pf->service_timer,
11729 round_jiffies(jiffies + pf->service_timer_period));
11730
e3219ce6 11731 /* add this PF to client device list and launch a client service task */
004eb614
MW
11732 if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
11733 err = i40e_lan_add_device(pf);
11734 if (err)
11735 dev_info(&pdev->dev, "Failed to add PF to client API service list: %d\n",
11736 err);
11737 }
e3219ce6 11738
3fced535
ASJ
11739#define PCI_SPEED_SIZE 8
11740#define PCI_WIDTH_SIZE 8
11741 /* Devices on the IOSF bus do not have this information
11742 * and will report PCI Gen 1 x 1 by default so don't bother
11743 * checking them.
11744 */
d36e41dc 11745 if (!(pf->hw_features & I40E_HW_NO_PCI_LINK_CHECK)) {
3fced535
ASJ
11746 char speed[PCI_SPEED_SIZE] = "Unknown";
11747 char width[PCI_WIDTH_SIZE] = "Unknown";
11748
11749 /* Get the negotiated link width and speed from PCI config
11750 * space
11751 */
11752 pcie_capability_read_word(pf->pdev, PCI_EXP_LNKSTA,
11753 &link_status);
11754
11755 i40e_set_pci_config_data(hw, link_status);
11756
11757 switch (hw->bus.speed) {
11758 case i40e_bus_speed_8000:
11759 strncpy(speed, "8.0", PCI_SPEED_SIZE); break;
11760 case i40e_bus_speed_5000:
11761 strncpy(speed, "5.0", PCI_SPEED_SIZE); break;
11762 case i40e_bus_speed_2500:
11763 strncpy(speed, "2.5", PCI_SPEED_SIZE); break;
11764 default:
11765 break;
11766 }
11767 switch (hw->bus.width) {
11768 case i40e_bus_width_pcie_x8:
11769 strncpy(width, "8", PCI_WIDTH_SIZE); break;
11770 case i40e_bus_width_pcie_x4:
11771 strncpy(width, "4", PCI_WIDTH_SIZE); break;
11772 case i40e_bus_width_pcie_x2:
11773 strncpy(width, "2", PCI_WIDTH_SIZE); break;
11774 case i40e_bus_width_pcie_x1:
11775 strncpy(width, "1", PCI_WIDTH_SIZE); break;
11776 default:
11777 break;
11778 }
11779
11780 dev_info(&pdev->dev, "PCI-Express: Speed %sGT/s Width x%s\n",
11781 speed, width);
11782
11783 if (hw->bus.width < i40e_bus_width_pcie_x8 ||
11784 hw->bus.speed < i40e_bus_speed_8000) {
11785 dev_warn(&pdev->dev, "PCI-Express bandwidth available for this device may be insufficient for optimal performance.\n");
11786 dev_warn(&pdev->dev, "Please move the device to a different PCI-e link with more lanes and/or higher transfer rate.\n");
11787 }
d4dfb81a
CS
11788 }
11789
e827845c
CS
11790 /* get the requested speeds from the fw */
11791 err = i40e_aq_get_phy_capabilities(hw, false, false, &abilities, NULL);
11792 if (err)
8279e495
NP
11793 dev_dbg(&pf->pdev->dev, "get requested speeds ret = %s last_status = %s\n",
11794 i40e_stat_str(&pf->hw, err),
11795 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
e827845c
CS
11796 pf->hw.phy.link_info.requested_speeds = abilities.link_speed;
11797
fc72dbce
CS
11798 /* get the supported phy types from the fw */
11799 err = i40e_aq_get_phy_capabilities(hw, false, true, &abilities, NULL);
11800 if (err)
11801 dev_dbg(&pf->pdev->dev, "get supported phy types ret = %s last_status = %s\n",
11802 i40e_stat_str(&pf->hw, err),
11803 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
fc72dbce 11804
e7358f54
ASJ
11805 /* Add a filter to drop all Flow control frames from any VSI from being
11806 * transmitted. By doing so we stop a malicious VF from sending out
11807 * PAUSE or PFC frames and potentially controlling traffic for other
11808 * PF/VF VSIs.
11809 * The FW can still send Flow control frames if enabled.
11810 */
11811 i40e_add_filter_to_drop_tx_flow_control_frames(&pf->hw,
11812 pf->main_vsi_seid);
11813
31b606d0 11814 if ((pf->hw.device_id == I40E_DEV_ID_10G_BASE_T) ||
4f9b4307 11815 (pf->hw.device_id == I40E_DEV_ID_10G_BASE_T4))
d36e41dc 11816 pf->hw_features |= I40E_HW_PHY_CONTROLS_LEDS;
4ad9f4f9 11817 if (pf->hw.device_id == I40E_DEV_ID_SFP_I_X722)
d36e41dc 11818 pf->hw_features |= I40E_HW_HAVE_CRT_RETIMER;
0c22b3dd
JB
11819 /* print a string summarizing features */
11820 i40e_print_features(pf);
11821
41c445ff
JB
11822 return 0;
11823
11824 /* Unwind what we've done if something failed in the setup */
11825err_vsis:
9e6c9c0f 11826 set_bit(__I40E_DOWN, pf->state);
41c445ff
JB
11827 i40e_clear_interrupt_scheme(pf);
11828 kfree(pf->vsi);
04b03013
SN
11829err_switch_setup:
11830 i40e_reset_interrupt_capability(pf);
41c445ff
JB
11831 del_timer_sync(&pf->service_timer);
11832err_mac_addr:
11833err_configure_lan_hmc:
11834 (void)i40e_shutdown_lan_hmc(hw);
11835err_init_lan_hmc:
11836 kfree(pf->qp_pile);
41c445ff
JB
11837err_sw_init:
11838err_adminq_setup:
41c445ff
JB
11839err_pf_reset:
11840 iounmap(hw->hw_addr);
11841err_ioremap:
11842 kfree(pf);
11843err_pf_alloc:
11844 pci_disable_pcie_error_reporting(pdev);
56d766d6 11845 pci_release_mem_regions(pdev);
41c445ff
JB
11846err_pci_reg:
11847err_dma:
11848 pci_disable_device(pdev);
11849 return err;
11850}
11851
11852/**
11853 * i40e_remove - Device removal routine
11854 * @pdev: PCI device information struct
11855 *
11856 * i40e_remove is called by the PCI subsystem to alert the driver
11857 * that is should release a PCI device. This could be caused by a
11858 * Hot-Plug event, or because the driver is going to be removed from
11859 * memory.
11860 **/
11861static void i40e_remove(struct pci_dev *pdev)
11862{
11863 struct i40e_pf *pf = pci_get_drvdata(pdev);
bcab2db9 11864 struct i40e_hw *hw = &pf->hw;
41c445ff 11865 i40e_status ret_code;
41c445ff
JB
11866 int i;
11867
11868 i40e_dbg_pf_exit(pf);
11869
beb0dff1
JK
11870 i40e_ptp_stop(pf);
11871
bcab2db9 11872 /* Disable RSS in hw */
272cdaf2
SN
11873 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), 0);
11874 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), 0);
bcab2db9 11875
41c445ff 11876 /* no more scheduling of any task */
0da36b97 11877 set_bit(__I40E_SUSPENDED, pf->state);
9e6c9c0f 11878 set_bit(__I40E_DOWN, pf->state);
c99abb4c
SN
11879 if (pf->service_timer.data)
11880 del_timer_sync(&pf->service_timer);
11881 if (pf->service_task.func)
11882 cancel_work_sync(&pf->service_task);
41c445ff 11883
921c467c
MW
11884 /* Client close must be called explicitly here because the timer
11885 * has been stopped.
11886 */
11887 i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], false);
11888
eb2d80bc
MW
11889 if (pf->flags & I40E_FLAG_SRIOV_ENABLED) {
11890 i40e_free_vfs(pf);
11891 pf->flags &= ~I40E_FLAG_SRIOV_ENABLED;
11892 }
11893
41c445ff
JB
11894 i40e_fdir_teardown(pf);
11895
11896 /* If there is a switch structure or any orphans, remove them.
11897 * This will leave only the PF's VSI remaining.
11898 */
11899 for (i = 0; i < I40E_MAX_VEB; i++) {
11900 if (!pf->veb[i])
11901 continue;
11902
11903 if (pf->veb[i]->uplink_seid == pf->mac_seid ||
11904 pf->veb[i]->uplink_seid == 0)
11905 i40e_switch_branch_release(pf->veb[i]);
11906 }
11907
11908 /* Now we can shutdown the PF's VSI, just before we kill
11909 * adminq and hmc.
11910 */
11911 if (pf->vsi[pf->lan_vsi])
11912 i40e_vsi_release(pf->vsi[pf->lan_vsi]);
11913
e3219ce6 11914 /* remove attached clients */
004eb614
MW
11915 if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
11916 ret_code = i40e_lan_del_device(pf);
11917 if (ret_code)
11918 dev_warn(&pdev->dev, "Failed to delete client device: %d\n",
11919 ret_code);
e3219ce6
ASJ
11920 }
11921
41c445ff 11922 /* shutdown and destroy the HMC */
f734dfff
JB
11923 if (hw->hmc.hmc_obj) {
11924 ret_code = i40e_shutdown_lan_hmc(hw);
60442dea
SN
11925 if (ret_code)
11926 dev_warn(&pdev->dev,
11927 "Failed to destroy the HMC resources: %d\n",
11928 ret_code);
11929 }
41c445ff
JB
11930
11931 /* shutdown the adminq */
ac9c5c6d 11932 i40e_shutdown_adminq(hw);
41c445ff 11933
8ddb3326
JB
11934 /* destroy the locks only once, here */
11935 mutex_destroy(&hw->aq.arq_mutex);
11936 mutex_destroy(&hw->aq.asq_mutex);
11937
41c445ff
JB
11938 /* Clear all dynamic memory lists of rings, q_vectors, and VSIs */
11939 i40e_clear_interrupt_scheme(pf);
505682cd 11940 for (i = 0; i < pf->num_alloc_vsi; i++) {
41c445ff
JB
11941 if (pf->vsi[i]) {
11942 i40e_vsi_clear_rings(pf->vsi[i]);
11943 i40e_vsi_clear(pf->vsi[i]);
11944 pf->vsi[i] = NULL;
11945 }
11946 }
11947
11948 for (i = 0; i < I40E_MAX_VEB; i++) {
11949 kfree(pf->veb[i]);
11950 pf->veb[i] = NULL;
11951 }
11952
11953 kfree(pf->qp_pile);
41c445ff
JB
11954 kfree(pf->vsi);
11955
f734dfff 11956 iounmap(hw->hw_addr);
41c445ff 11957 kfree(pf);
56d766d6 11958 pci_release_mem_regions(pdev);
41c445ff
JB
11959
11960 pci_disable_pcie_error_reporting(pdev);
11961 pci_disable_device(pdev);
11962}
11963
11964/**
11965 * i40e_pci_error_detected - warning that something funky happened in PCI land
11966 * @pdev: PCI device information struct
11967 *
11968 * Called to warn that something happened and the error handling steps
11969 * are in progress. Allows the driver to quiesce things, be ready for
11970 * remediation.
11971 **/
11972static pci_ers_result_t i40e_pci_error_detected(struct pci_dev *pdev,
11973 enum pci_channel_state error)
11974{
11975 struct i40e_pf *pf = pci_get_drvdata(pdev);
11976
11977 dev_info(&pdev->dev, "%s: error %d\n", __func__, error);
11978
edfc23ee
GP
11979 if (!pf) {
11980 dev_info(&pdev->dev,
11981 "Cannot recover - error happened during device probe\n");
11982 return PCI_ERS_RESULT_DISCONNECT;
11983 }
11984
41c445ff 11985 /* shutdown all operations */
dfc4ff64
JK
11986 if (!test_bit(__I40E_SUSPENDED, pf->state))
11987 i40e_prep_for_reset(pf, false);
41c445ff
JB
11988
11989 /* Request a slot reset */
11990 return PCI_ERS_RESULT_NEED_RESET;
11991}
11992
11993/**
11994 * i40e_pci_error_slot_reset - a PCI slot reset just happened
11995 * @pdev: PCI device information struct
11996 *
11997 * Called to find if the driver can work with the device now that
11998 * the pci slot has been reset. If a basic connection seems good
11999 * (registers are readable and have sane content) then return a
12000 * happy little PCI_ERS_RESULT_xxx.
12001 **/
12002static pci_ers_result_t i40e_pci_error_slot_reset(struct pci_dev *pdev)
12003{
12004 struct i40e_pf *pf = pci_get_drvdata(pdev);
12005 pci_ers_result_t result;
12006 int err;
12007 u32 reg;
12008
fb43201f 12009 dev_dbg(&pdev->dev, "%s\n", __func__);
41c445ff
JB
12010 if (pci_enable_device_mem(pdev)) {
12011 dev_info(&pdev->dev,
12012 "Cannot re-enable PCI device after reset.\n");
12013 result = PCI_ERS_RESULT_DISCONNECT;
12014 } else {
12015 pci_set_master(pdev);
12016 pci_restore_state(pdev);
12017 pci_save_state(pdev);
12018 pci_wake_from_d3(pdev, false);
12019
12020 reg = rd32(&pf->hw, I40E_GLGEN_RTRIG);
12021 if (reg == 0)
12022 result = PCI_ERS_RESULT_RECOVERED;
12023 else
12024 result = PCI_ERS_RESULT_DISCONNECT;
12025 }
12026
12027 err = pci_cleanup_aer_uncorrect_error_status(pdev);
12028 if (err) {
12029 dev_info(&pdev->dev,
12030 "pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n",
12031 err);
12032 /* non-fatal, continue */
12033 }
12034
12035 return result;
12036}
12037
12038/**
12039 * i40e_pci_error_resume - restart operations after PCI error recovery
12040 * @pdev: PCI device information struct
12041 *
12042 * Called to allow the driver to bring things back up after PCI error
12043 * and/or reset recovery has finished.
12044 **/
12045static void i40e_pci_error_resume(struct pci_dev *pdev)
12046{
12047 struct i40e_pf *pf = pci_get_drvdata(pdev);
12048
fb43201f 12049 dev_dbg(&pdev->dev, "%s\n", __func__);
0da36b97 12050 if (test_bit(__I40E_SUSPENDED, pf->state))
9007bccd
SN
12051 return;
12052
dfc4ff64 12053 i40e_handle_reset_warning(pf, false);
9007bccd
SN
12054}
12055
1d68005d
JH
12056/**
12057 * i40e_enable_mc_magic_wake - enable multicast magic packet wake up
12058 * using the mac_address_write admin q function
12059 * @pf: pointer to i40e_pf struct
12060 **/
12061static void i40e_enable_mc_magic_wake(struct i40e_pf *pf)
12062{
12063 struct i40e_hw *hw = &pf->hw;
12064 i40e_status ret;
12065 u8 mac_addr[6];
12066 u16 flags = 0;
12067
12068 /* Get current MAC address in case it's an LAA */
12069 if (pf->vsi[pf->lan_vsi] && pf->vsi[pf->lan_vsi]->netdev) {
12070 ether_addr_copy(mac_addr,
12071 pf->vsi[pf->lan_vsi]->netdev->dev_addr);
12072 } else {
12073 dev_err(&pf->pdev->dev,
12074 "Failed to retrieve MAC address; using default\n");
12075 ether_addr_copy(mac_addr, hw->mac.addr);
12076 }
12077
12078 /* The FW expects the mac address write cmd to first be called with
12079 * one of these flags before calling it again with the multicast
12080 * enable flags.
12081 */
12082 flags = I40E_AQC_WRITE_TYPE_LAA_WOL;
12083
12084 if (hw->func_caps.flex10_enable && hw->partition_id != 1)
12085 flags = I40E_AQC_WRITE_TYPE_LAA_ONLY;
12086
12087 ret = i40e_aq_mac_address_write(hw, flags, mac_addr, NULL);
12088 if (ret) {
12089 dev_err(&pf->pdev->dev,
12090 "Failed to update MAC address registers; cannot enable Multicast Magic packet wake up");
12091 return;
12092 }
12093
12094 flags = I40E_AQC_MC_MAG_EN
12095 | I40E_AQC_WOL_PRESERVE_ON_PFR
12096 | I40E_AQC_WRITE_TYPE_UPDATE_MC_MAG;
12097 ret = i40e_aq_mac_address_write(hw, flags, mac_addr, NULL);
12098 if (ret)
12099 dev_err(&pf->pdev->dev,
12100 "Failed to enable Multicast Magic Packet wake up\n");
12101}
12102
9007bccd
SN
12103/**
12104 * i40e_shutdown - PCI callback for shutting down
12105 * @pdev: PCI device information struct
12106 **/
12107static void i40e_shutdown(struct pci_dev *pdev)
12108{
12109 struct i40e_pf *pf = pci_get_drvdata(pdev);
8e2773ae 12110 struct i40e_hw *hw = &pf->hw;
9007bccd 12111
0da36b97 12112 set_bit(__I40E_SUSPENDED, pf->state);
9e6c9c0f 12113 set_bit(__I40E_DOWN, pf->state);
9007bccd 12114 rtnl_lock();
373149fc 12115 i40e_prep_for_reset(pf, true);
9007bccd
SN
12116 rtnl_unlock();
12117
8e2773ae
SN
12118 wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
12119 wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
12120
02b42498
CS
12121 del_timer_sync(&pf->service_timer);
12122 cancel_work_sync(&pf->service_task);
12123 i40e_fdir_teardown(pf);
12124
921c467c
MW
12125 /* Client close must be called explicitly here because the timer
12126 * has been stopped.
12127 */
12128 i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], false);
12129
d36e41dc 12130 if (pf->wol_en && (pf->hw_features & I40E_HW_WOL_MC_MAGIC_PKT_WAKE))
1d68005d
JH
12131 i40e_enable_mc_magic_wake(pf);
12132
dfc4ff64 12133 i40e_prep_for_reset(pf, false);
02b42498
CS
12134
12135 wr32(hw, I40E_PFPM_APM,
12136 (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
12137 wr32(hw, I40E_PFPM_WUFC,
12138 (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
12139
e147758d
SN
12140 i40e_clear_interrupt_scheme(pf);
12141
9007bccd 12142 if (system_state == SYSTEM_POWER_OFF) {
8e2773ae 12143 pci_wake_from_d3(pdev, pf->wol_en);
9007bccd
SN
12144 pci_set_power_state(pdev, PCI_D3hot);
12145 }
12146}
12147
12148#ifdef CONFIG_PM
12149/**
0e5d3da4
JK
12150 * i40e_suspend - PM callback for moving to D3
12151 * @dev: generic device information structure
9007bccd 12152 **/
0e5d3da4 12153static int i40e_suspend(struct device *dev)
9007bccd 12154{
0e5d3da4 12155 struct pci_dev *pdev = to_pci_dev(dev);
9007bccd 12156 struct i40e_pf *pf = pci_get_drvdata(pdev);
8e2773ae 12157 struct i40e_hw *hw = &pf->hw;
9007bccd 12158
401586c2
JK
12159 /* If we're already suspended, then there is nothing to do */
12160 if (test_and_set_bit(__I40E_SUSPENDED, pf->state))
12161 return 0;
12162
9e6c9c0f 12163 set_bit(__I40E_DOWN, pf->state);
3932dbfe 12164
5c499228
JK
12165 /* Ensure service task will not be running */
12166 del_timer_sync(&pf->service_timer);
12167 cancel_work_sync(&pf->service_task);
12168
d36e41dc 12169 if (pf->wol_en && (pf->hw_features & I40E_HW_WOL_MC_MAGIC_PKT_WAKE))
1d68005d
JH
12170 i40e_enable_mc_magic_wake(pf);
12171
dfc4ff64 12172 i40e_prep_for_reset(pf, false);
9007bccd 12173
8e2773ae
SN
12174 wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
12175 wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
12176
b980c063
JK
12177 /* Clear the interrupt scheme and release our IRQs so that the system
12178 * can safely hibernate even when there are a large number of CPUs.
12179 * Otherwise hibernation might fail when mapping all the vectors back
12180 * to CPU0.
12181 */
12182 i40e_clear_interrupt_scheme(pf);
c17401a1 12183
0e5d3da4 12184 return 0;
41c445ff
JB
12185}
12186
9007bccd 12187/**
0e5d3da4
JK
12188 * i40e_resume - PM callback for waking up from D3
12189 * @dev: generic device information structure
9007bccd 12190 **/
0e5d3da4 12191static int i40e_resume(struct device *dev)
9007bccd 12192{
0e5d3da4 12193 struct pci_dev *pdev = to_pci_dev(dev);
9007bccd 12194 struct i40e_pf *pf = pci_get_drvdata(pdev);
b980c063 12195 int err;
9007bccd 12196
401586c2
JK
12197 /* If we're not suspended, then there is nothing to do */
12198 if (!test_bit(__I40E_SUSPENDED, pf->state))
12199 return 0;
12200
b980c063
JK
12201 /* We cleared the interrupt scheme when we suspended, so we need to
12202 * restore it now to resume device functionality.
12203 */
12204 err = i40e_restore_interrupt_scheme(pf);
12205 if (err) {
12206 dev_err(&pdev->dev, "Cannot restore interrupt scheme: %d\n",
12207 err);
12208 }
12209
401586c2
JK
12210 clear_bit(__I40E_DOWN, pf->state);
12211 i40e_reset_and_rebuild(pf, false, false);
12212
12213 /* Clear suspended state last after everything is recovered */
12214 clear_bit(__I40E_SUSPENDED, pf->state);
9007bccd 12215
5c499228
JK
12216 /* Restart the service task */
12217 mod_timer(&pf->service_timer,
12218 round_jiffies(jiffies + pf->service_timer_period));
12219
9007bccd
SN
12220 return 0;
12221}
12222
0e5d3da4
JK
12223#endif /* CONFIG_PM */
12224
41c445ff
JB
12225static const struct pci_error_handlers i40e_err_handler = {
12226 .error_detected = i40e_pci_error_detected,
12227 .slot_reset = i40e_pci_error_slot_reset,
12228 .resume = i40e_pci_error_resume,
12229};
12230
0e5d3da4
JK
12231static SIMPLE_DEV_PM_OPS(i40e_pm_ops, i40e_suspend, i40e_resume);
12232
41c445ff
JB
12233static struct pci_driver i40e_driver = {
12234 .name = i40e_driver_name,
12235 .id_table = i40e_pci_tbl,
12236 .probe = i40e_probe,
12237 .remove = i40e_remove,
9007bccd 12238#ifdef CONFIG_PM
0e5d3da4
JK
12239 .driver = {
12240 .pm = &i40e_pm_ops,
12241 },
12242#endif /* CONFIG_PM */
9007bccd 12243 .shutdown = i40e_shutdown,
41c445ff
JB
12244 .err_handler = &i40e_err_handler,
12245 .sriov_configure = i40e_pci_sriov_configure,
12246};
12247
12248/**
12249 * i40e_init_module - Driver registration routine
12250 *
12251 * i40e_init_module is the first routine called when the driver is
12252 * loaded. All it does is register with the PCI subsystem.
12253 **/
12254static int __init i40e_init_module(void)
12255{
12256 pr_info("%s: %s - version %s\n", i40e_driver_name,
12257 i40e_driver_string, i40e_driver_version_str);
12258 pr_info("%s: %s\n", i40e_driver_name, i40e_copyright);
96664483 12259
4d5957cb
JK
12260 /* There is no need to throttle the number of active tasks because
12261 * each device limits its own task using a state bit for scheduling
12262 * the service task, and the device tasks do not interfere with each
12263 * other, so we don't set a max task limit. We must set WQ_MEM_RECLAIM
12264 * since we need to be able to guarantee forward progress even under
12265 * memory pressure.
2803b16c 12266 */
4d5957cb 12267 i40e_wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0, i40e_driver_name);
2803b16c
JB
12268 if (!i40e_wq) {
12269 pr_err("%s: Failed to create workqueue\n", i40e_driver_name);
12270 return -ENOMEM;
12271 }
12272
41c445ff
JB
12273 i40e_dbg_init();
12274 return pci_register_driver(&i40e_driver);
12275}
12276module_init(i40e_init_module);
12277
12278/**
12279 * i40e_exit_module - Driver exit cleanup routine
12280 *
12281 * i40e_exit_module is called just before the driver is removed
12282 * from memory.
12283 **/
12284static void __exit i40e_exit_module(void)
12285{
12286 pci_unregister_driver(&i40e_driver);
2803b16c 12287 destroy_workqueue(i40e_wq);
41c445ff
JB
12288 i40e_dbg_exit();
12289}
12290module_exit(i40e_exit_module);