i40e: increase indentation
[linux-2.6-block.git] / drivers / net / ethernet / intel / i40e / i40e_main.c
CommitLineData
ae06c70b 1// SPDX-License-Identifier: GPL-2.0
51dce24b 2/* Copyright(c) 2013 - 2018 Intel Corporation. */
41c445ff 3
b499ffb0
SV
4#include <linux/etherdevice.h>
5#include <linux/of_net.h>
6#include <linux/pci.h>
0c8493d9 7#include <linux/bpf.h>
b499ffb0 8
41c445ff
JB
9/* Local includes */
10#include "i40e.h"
4eb3f768 11#include "i40e_diag.h"
0a714186 12#include "i40e_xsk.h"
06a5f7f1 13#include <net/udp_tunnel.h>
0a714186 14#include <net/xdp_sock.h>
ed0980c4
SP
15/* All i40e tracepoints are defined by the include below, which
16 * must be included exactly once across the whole kernel with
17 * CREATE_TRACE_POINTS defined
18 */
19#define CREATE_TRACE_POINTS
20#include "i40e_trace.h"
41c445ff
JB
21
22const char i40e_driver_name[] = "i40e";
23static const char i40e_driver_string[] =
24 "Intel(R) Ethernet Connection XL710 Network Driver";
25
26#define DRV_KERN "-k"
27
15990832 28#define DRV_VERSION_MAJOR 2
9f250f15
AM
29#define DRV_VERSION_MINOR 8
30#define DRV_VERSION_BUILD 10
41c445ff
JB
31#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
32 __stringify(DRV_VERSION_MINOR) "." \
33 __stringify(DRV_VERSION_BUILD) DRV_KERN
34const char i40e_driver_version_str[] = DRV_VERSION;
8fb905b3 35static const char i40e_copyright[] = "Copyright (c) 2013 - 2014 Intel Corporation.";
41c445ff
JB
36
37/* a bit of forward declarations */
38static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi);
373149fc 39static void i40e_handle_reset_warning(struct i40e_pf *pf, bool lock_acquired);
41c445ff
JB
40static int i40e_add_vsi(struct i40e_vsi *vsi);
41static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi);
bc7d338f 42static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit);
41c445ff
JB
43static int i40e_setup_misc_vector(struct i40e_pf *pf);
44static void i40e_determine_queue_usage(struct i40e_pf *pf);
45static int i40e_setup_pf_filter_control(struct i40e_pf *pf);
373149fc
MS
46static void i40e_prep_for_reset(struct i40e_pf *pf, bool lock_acquired);
47static int i40e_reset(struct i40e_pf *pf);
48static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired);
cbf61325 49static void i40e_fdir_sb_setup(struct i40e_pf *pf);
4e3b35b0 50static int i40e_veb_get_bw_info(struct i40e_veb *veb);
2f4b411a
AN
51static int i40e_get_capabilities(struct i40e_pf *pf,
52 enum i40e_admin_queue_opc list_type);
53
41c445ff
JB
54
55/* i40e_pci_tbl - PCI Device ID Table
56 *
57 * Last entry must be all 0s
58 *
59 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
60 * Class, Class Mask, private data (not used) }
61 */
9baa3c34 62static const struct pci_device_id i40e_pci_tbl[] = {
ab60085e 63 {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_XL710), 0},
ab60085e 64 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QEMU), 0},
ab60085e
SN
65 {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_B), 0},
66 {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_C), 0},
ab60085e
SN
67 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_A), 0},
68 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_B), 0},
69 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_C), 0},
5960d33f 70 {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T), 0},
bc5166b9 71 {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T4), 0},
35dae51d
ASJ
72 {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_X722), 0},
73 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_X722), 0},
87e6c1d7
ASJ
74 {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_X722), 0},
75 {PCI_VDEVICE(INTEL, I40E_DEV_ID_1G_BASE_T_X722), 0},
76 {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T_X722), 0},
d6bf58c2 77 {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_I_X722), 0},
48a3b512
SN
78 {PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2), 0},
79 {PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2_A), 0},
3123237a
CW
80 {PCI_VDEVICE(INTEL, I40E_DEV_ID_25G_B), 0},
81 {PCI_VDEVICE(INTEL, I40E_DEV_ID_25G_SFP28), 0},
41c445ff
JB
82 /* required last entry */
83 {0, }
84};
85MODULE_DEVICE_TABLE(pci, i40e_pci_tbl);
86
87#define I40E_MAX_VF_COUNT 128
88static int debug = -1;
5d4ca23e
AD
89module_param(debug, uint, 0);
90MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all), Debug mask (0x8XXXXXXX)");
41c445ff
JB
91
92MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
93MODULE_DESCRIPTION("Intel(R) Ethernet Connection XL710 Network Driver");
98674ebe 94MODULE_LICENSE("GPL v2");
41c445ff
JB
95MODULE_VERSION(DRV_VERSION);
96
2803b16c
JB
97static struct workqueue_struct *i40e_wq;
98
41c445ff
JB
99/**
100 * i40e_allocate_dma_mem_d - OS specific memory alloc for shared code
101 * @hw: pointer to the HW structure
102 * @mem: ptr to mem struct to fill out
103 * @size: size of memory requested
104 * @alignment: what to align the allocation to
105 **/
106int i40e_allocate_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem,
107 u64 size, u32 alignment)
108{
109 struct i40e_pf *pf = (struct i40e_pf *)hw->back;
110
111 mem->size = ALIGN(size, alignment);
750afb08
LC
112 mem->va = dma_alloc_coherent(&pf->pdev->dev, mem->size, &mem->pa,
113 GFP_KERNEL);
93bc73b8
JB
114 if (!mem->va)
115 return -ENOMEM;
41c445ff 116
93bc73b8 117 return 0;
41c445ff
JB
118}
119
120/**
121 * i40e_free_dma_mem_d - OS specific memory free for shared code
122 * @hw: pointer to the HW structure
123 * @mem: ptr to mem struct to free
124 **/
125int i40e_free_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem)
126{
127 struct i40e_pf *pf = (struct i40e_pf *)hw->back;
128
129 dma_free_coherent(&pf->pdev->dev, mem->size, mem->va, mem->pa);
130 mem->va = NULL;
131 mem->pa = 0;
132 mem->size = 0;
133
134 return 0;
135}
136
137/**
138 * i40e_allocate_virt_mem_d - OS specific memory alloc for shared code
139 * @hw: pointer to the HW structure
140 * @mem: ptr to mem struct to fill out
141 * @size: size of memory requested
142 **/
143int i40e_allocate_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem,
144 u32 size)
145{
146 mem->size = size;
147 mem->va = kzalloc(size, GFP_KERNEL);
148
93bc73b8
JB
149 if (!mem->va)
150 return -ENOMEM;
41c445ff 151
93bc73b8 152 return 0;
41c445ff
JB
153}
154
155/**
156 * i40e_free_virt_mem_d - OS specific memory free for shared code
157 * @hw: pointer to the HW structure
158 * @mem: ptr to mem struct to free
159 **/
160int i40e_free_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem)
161{
162 /* it's ok to kfree a NULL pointer */
163 kfree(mem->va);
164 mem->va = NULL;
165 mem->size = 0;
166
167 return 0;
168}
169
170/**
171 * i40e_get_lump - find a lump of free generic resource
172 * @pf: board private structure
173 * @pile: the pile of resource to search
174 * @needed: the number of items needed
175 * @id: an owner id to stick on the items assigned
176 *
177 * Returns the base item index of the lump, or negative for error
178 *
179 * The search_hint trick and lack of advanced fit-finding only work
180 * because we're highly likely to have all the same size lump requests.
181 * Linear search time and any fragmentation should be minimal.
182 **/
183static int i40e_get_lump(struct i40e_pf *pf, struct i40e_lump_tracking *pile,
184 u16 needed, u16 id)
185{
186 int ret = -ENOMEM;
ddf434ac 187 int i, j;
41c445ff
JB
188
189 if (!pile || needed == 0 || id >= I40E_PILE_VALID_BIT) {
190 dev_info(&pf->pdev->dev,
7be78aa4
MW
191 "param err: pile=%s needed=%d id=0x%04x\n",
192 pile ? "<valid>" : "<null>", needed, id);
41c445ff
JB
193 return -EINVAL;
194 }
195
196 /* start the linear search with an imperfect hint */
197 i = pile->search_hint;
ddf434ac 198 while (i < pile->num_entries) {
41c445ff
JB
199 /* skip already allocated entries */
200 if (pile->list[i] & I40E_PILE_VALID_BIT) {
201 i++;
202 continue;
203 }
204
205 /* do we have enough in this lump? */
206 for (j = 0; (j < needed) && ((i+j) < pile->num_entries); j++) {
207 if (pile->list[i+j] & I40E_PILE_VALID_BIT)
208 break;
209 }
210
211 if (j == needed) {
212 /* there was enough, so assign it to the requestor */
213 for (j = 0; j < needed; j++)
214 pile->list[i+j] = id | I40E_PILE_VALID_BIT;
215 ret = i;
216 pile->search_hint = i + j;
ddf434ac 217 break;
41c445ff 218 }
6995b36c
JB
219
220 /* not enough, so skip over it and continue looking */
221 i += j;
41c445ff
JB
222 }
223
224 return ret;
225}
226
227/**
228 * i40e_put_lump - return a lump of generic resource
229 * @pile: the pile of resource to search
230 * @index: the base item index
231 * @id: the owner id of the items assigned
232 *
233 * Returns the count of items in the lump
234 **/
235static int i40e_put_lump(struct i40e_lump_tracking *pile, u16 index, u16 id)
236{
237 int valid_id = (id | I40E_PILE_VALID_BIT);
238 int count = 0;
239 int i;
240
241 if (!pile || index >= pile->num_entries)
242 return -EINVAL;
243
244 for (i = index;
245 i < pile->num_entries && pile->list[i] == valid_id;
246 i++) {
247 pile->list[i] = 0;
248 count++;
249 }
250
251 if (count && index < pile->search_hint)
252 pile->search_hint = index;
253
254 return count;
255}
256
fdf0e0bf
ASJ
257/**
258 * i40e_find_vsi_from_id - searches for the vsi with the given id
f5254429
JK
259 * @pf: the pf structure to search for the vsi
260 * @id: id of the vsi it is searching for
fdf0e0bf
ASJ
261 **/
262struct i40e_vsi *i40e_find_vsi_from_id(struct i40e_pf *pf, u16 id)
263{
264 int i;
265
266 for (i = 0; i < pf->num_alloc_vsi; i++)
267 if (pf->vsi[i] && (pf->vsi[i]->id == id))
268 return pf->vsi[i];
269
270 return NULL;
271}
272
41c445ff
JB
273/**
274 * i40e_service_event_schedule - Schedule the service task to wake up
275 * @pf: board private structure
276 *
277 * If not already scheduled, this puts the task into the work queue
278 **/
e3219ce6 279void i40e_service_event_schedule(struct i40e_pf *pf)
41c445ff 280{
9e6c9c0f 281 if (!test_bit(__I40E_DOWN, pf->state) &&
0da36b97 282 !test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
2803b16c 283 queue_work(i40e_wq, &pf->service_task);
41c445ff
JB
284}
285
286/**
287 * i40e_tx_timeout - Respond to a Tx Hang
288 * @netdev: network interface device structure
289 *
290 * If any port has noticed a Tx timeout, it is likely that the whole
291 * device is munged, not just the one netdev port, so go for the full
292 * reset.
293 **/
294static void i40e_tx_timeout(struct net_device *netdev)
295{
296 struct i40e_netdev_priv *np = netdev_priv(netdev);
297 struct i40e_vsi *vsi = np->vsi;
298 struct i40e_pf *pf = vsi->back;
b03a8c1f
KP
299 struct i40e_ring *tx_ring = NULL;
300 unsigned int i, hung_queue = 0;
301 u32 head, val;
41c445ff
JB
302
303 pf->tx_timeout_count++;
304
b03a8c1f
KP
305 /* find the stopped queue the same way the stack does */
306 for (i = 0; i < netdev->num_tx_queues; i++) {
307 struct netdev_queue *q;
308 unsigned long trans_start;
309
310 q = netdev_get_tx_queue(netdev, i);
9b36627a 311 trans_start = q->trans_start;
b03a8c1f
KP
312 if (netif_xmit_stopped(q) &&
313 time_after(jiffies,
314 (trans_start + netdev->watchdog_timeo))) {
315 hung_queue = i;
316 break;
317 }
318 }
319
320 if (i == netdev->num_tx_queues) {
321 netdev_info(netdev, "tx_timeout: no netdev hung queue found\n");
322 } else {
323 /* now that we have an index, find the tx_ring struct */
324 for (i = 0; i < vsi->num_queue_pairs; i++) {
325 if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) {
326 if (hung_queue ==
327 vsi->tx_rings[i]->queue_index) {
328 tx_ring = vsi->tx_rings[i];
329 break;
330 }
331 }
332 }
333 }
334
41c445ff 335 if (time_after(jiffies, (pf->tx_timeout_last_recovery + HZ*20)))
b03a8c1f
KP
336 pf->tx_timeout_recovery_level = 1; /* reset after some time */
337 else if (time_before(jiffies,
338 (pf->tx_timeout_last_recovery + netdev->watchdog_timeo)))
339 return; /* don't do any new action before the next timeout */
340
d5585b7b
AB
341 /* don't kick off another recovery if one is already pending */
342 if (test_and_set_bit(__I40E_TIMEOUT_RECOVERY_PENDING, pf->state))
343 return;
344
b03a8c1f
KP
345 if (tx_ring) {
346 head = i40e_get_head(tx_ring);
347 /* Read interrupt register */
348 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
349 val = rd32(&pf->hw,
350 I40E_PFINT_DYN_CTLN(tx_ring->q_vector->v_idx +
351 tx_ring->vsi->base_vector - 1));
352 else
353 val = rd32(&pf->hw, I40E_PFINT_DYN_CTL0);
354
355 netdev_info(netdev, "tx_timeout: VSI_seid: %d, Q %d, NTC: 0x%x, HWB: 0x%x, NTU: 0x%x, TAIL: 0x%x, INT: 0x%x\n",
356 vsi->seid, hung_queue, tx_ring->next_to_clean,
357 head, tx_ring->next_to_use,
358 readl(tx_ring->tail), val);
359 }
360
41c445ff 361 pf->tx_timeout_last_recovery = jiffies;
b03a8c1f
KP
362 netdev_info(netdev, "tx_timeout recovery level %d, hung_queue %d\n",
363 pf->tx_timeout_recovery_level, hung_queue);
41c445ff
JB
364
365 switch (pf->tx_timeout_recovery_level) {
41c445ff 366 case 1:
0da36b97 367 set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
41c445ff
JB
368 break;
369 case 2:
0da36b97 370 set_bit(__I40E_CORE_RESET_REQUESTED, pf->state);
41c445ff
JB
371 break;
372 case 3:
0da36b97 373 set_bit(__I40E_GLOBAL_RESET_REQUESTED, pf->state);
41c445ff
JB
374 break;
375 default:
376 netdev_err(netdev, "tx_timeout recovery unsuccessful\n");
41c445ff
JB
377 break;
378 }
b03a8c1f 379
41c445ff
JB
380 i40e_service_event_schedule(pf);
381 pf->tx_timeout_recovery_level++;
382}
383
41c445ff
JB
384/**
385 * i40e_get_vsi_stats_struct - Get System Network Statistics
386 * @vsi: the VSI we care about
387 *
388 * Returns the address of the device statistics structure.
389 * The statistics are actually updated from the service task.
390 **/
391struct rtnl_link_stats64 *i40e_get_vsi_stats_struct(struct i40e_vsi *vsi)
392{
393 return &vsi->net_stats;
394}
395
74608d17
BT
396/**
397 * i40e_get_netdev_stats_struct_tx - populate stats from a Tx ring
398 * @ring: Tx ring to get statistics from
399 * @stats: statistics entry to be updated
400 **/
401static void i40e_get_netdev_stats_struct_tx(struct i40e_ring *ring,
402 struct rtnl_link_stats64 *stats)
403{
404 u64 bytes, packets;
405 unsigned int start;
406
407 do {
408 start = u64_stats_fetch_begin_irq(&ring->syncp);
409 packets = ring->stats.packets;
410 bytes = ring->stats.bytes;
411 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
412
413 stats->tx_packets += packets;
414 stats->tx_bytes += bytes;
415}
416
41c445ff
JB
417/**
418 * i40e_get_netdev_stats_struct - Get statistics for netdev interface
419 * @netdev: network interface device structure
f5254429 420 * @stats: data structure to store statistics
41c445ff
JB
421 *
422 * Returns the address of the device statistics structure.
423 * The statistics are actually updated from the service task.
424 **/
9eed69a9 425static void i40e_get_netdev_stats_struct(struct net_device *netdev,
bc1f4470 426 struct rtnl_link_stats64 *stats)
41c445ff
JB
427{
428 struct i40e_netdev_priv *np = netdev_priv(netdev);
429 struct i40e_vsi *vsi = np->vsi;
980e9b11 430 struct rtnl_link_stats64 *vsi_stats = i40e_get_vsi_stats_struct(vsi);
cdec2141 431 struct i40e_ring *ring;
980e9b11
AD
432 int i;
433
0da36b97 434 if (test_bit(__I40E_VSI_DOWN, vsi->state))
bc1f4470 435 return;
bc7d338f 436
3c325ced 437 if (!vsi->tx_rings)
bc1f4470 438 return;
3c325ced 439
980e9b11
AD
440 rcu_read_lock();
441 for (i = 0; i < vsi->num_queue_pairs; i++) {
980e9b11
AD
442 u64 bytes, packets;
443 unsigned int start;
444
cdec2141
BT
445 ring = READ_ONCE(vsi->tx_rings[i]);
446 if (!ring)
980e9b11 447 continue;
cdec2141 448 i40e_get_netdev_stats_struct_tx(ring, stats);
980e9b11 449
cdec2141
BT
450 if (i40e_enabled_xdp_vsi(vsi)) {
451 ring++;
452 i40e_get_netdev_stats_struct_tx(ring, stats);
453 }
980e9b11 454
cdec2141 455 ring++;
980e9b11 456 do {
cdec2141
BT
457 start = u64_stats_fetch_begin_irq(&ring->syncp);
458 packets = ring->stats.packets;
459 bytes = ring->stats.bytes;
460 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
41c445ff 461
980e9b11
AD
462 stats->rx_packets += packets;
463 stats->rx_bytes += bytes;
74608d17 464
980e9b11
AD
465 }
466 rcu_read_unlock();
467
a5282f44 468 /* following stats updated by i40e_watchdog_subtask() */
980e9b11
AD
469 stats->multicast = vsi_stats->multicast;
470 stats->tx_errors = vsi_stats->tx_errors;
471 stats->tx_dropped = vsi_stats->tx_dropped;
472 stats->rx_errors = vsi_stats->rx_errors;
d8201e20 473 stats->rx_dropped = vsi_stats->rx_dropped;
980e9b11
AD
474 stats->rx_crc_errors = vsi_stats->rx_crc_errors;
475 stats->rx_length_errors = vsi_stats->rx_length_errors;
41c445ff
JB
476}
477
478/**
479 * i40e_vsi_reset_stats - Resets all stats of the given vsi
480 * @vsi: the VSI to have its stats reset
481 **/
482void i40e_vsi_reset_stats(struct i40e_vsi *vsi)
483{
484 struct rtnl_link_stats64 *ns;
485 int i;
486
487 if (!vsi)
488 return;
489
490 ns = i40e_get_vsi_stats_struct(vsi);
491 memset(ns, 0, sizeof(*ns));
492 memset(&vsi->net_stats_offsets, 0, sizeof(vsi->net_stats_offsets));
493 memset(&vsi->eth_stats, 0, sizeof(vsi->eth_stats));
494 memset(&vsi->eth_stats_offsets, 0, sizeof(vsi->eth_stats_offsets));
8e9dca53 495 if (vsi->rx_rings && vsi->rx_rings[0]) {
41c445ff 496 for (i = 0; i < vsi->num_queue_pairs; i++) {
6995b36c 497 memset(&vsi->rx_rings[i]->stats, 0,
9f65e15b 498 sizeof(vsi->rx_rings[i]->stats));
6995b36c 499 memset(&vsi->rx_rings[i]->rx_stats, 0,
9f65e15b 500 sizeof(vsi->rx_rings[i]->rx_stats));
6995b36c 501 memset(&vsi->tx_rings[i]->stats, 0,
9f65e15b
AD
502 sizeof(vsi->tx_rings[i]->stats));
503 memset(&vsi->tx_rings[i]->tx_stats, 0,
504 sizeof(vsi->tx_rings[i]->tx_stats));
41c445ff 505 }
8e9dca53 506 }
41c445ff
JB
507 vsi->stat_offsets_loaded = false;
508}
509
510/**
b40c82e6 511 * i40e_pf_reset_stats - Reset all of the stats for the given PF
41c445ff
JB
512 * @pf: the PF to be reset
513 **/
514void i40e_pf_reset_stats(struct i40e_pf *pf)
515{
e91fdf76
SN
516 int i;
517
41c445ff
JB
518 memset(&pf->stats, 0, sizeof(pf->stats));
519 memset(&pf->stats_offsets, 0, sizeof(pf->stats_offsets));
520 pf->stat_offsets_loaded = false;
e91fdf76
SN
521
522 for (i = 0; i < I40E_MAX_VEB; i++) {
523 if (pf->veb[i]) {
524 memset(&pf->veb[i]->stats, 0,
525 sizeof(pf->veb[i]->stats));
526 memset(&pf->veb[i]->stats_offsets, 0,
527 sizeof(pf->veb[i]->stats_offsets));
528 pf->veb[i]->stat_offsets_loaded = false;
529 }
530 }
42bce04e 531 pf->hw_csum_rx_error = 0;
41c445ff
JB
532}
533
534/**
535 * i40e_stat_update48 - read and update a 48 bit stat from the chip
536 * @hw: ptr to the hardware info
537 * @hireg: the high 32 bit reg to read
538 * @loreg: the low 32 bit reg to read
539 * @offset_loaded: has the initial offset been loaded yet
540 * @offset: ptr to current offset value
541 * @stat: ptr to the stat
542 *
543 * Since the device stats are not reset at PFReset, they likely will not
544 * be zeroed when the driver starts. We'll save the first values read
545 * and use them as offsets to be subtracted from the raw values in order
546 * to report stats that count from zero. In the process, we also manage
547 * the potential roll-over.
548 **/
549static void i40e_stat_update48(struct i40e_hw *hw, u32 hireg, u32 loreg,
550 bool offset_loaded, u64 *offset, u64 *stat)
551{
552 u64 new_data;
553
ab60085e 554 if (hw->device_id == I40E_DEV_ID_QEMU) {
41c445ff
JB
555 new_data = rd32(hw, loreg);
556 new_data |= ((u64)(rd32(hw, hireg) & 0xFFFF)) << 32;
557 } else {
558 new_data = rd64(hw, loreg);
559 }
560 if (!offset_loaded)
561 *offset = new_data;
562 if (likely(new_data >= *offset))
563 *stat = new_data - *offset;
564 else
41a1d04b 565 *stat = (new_data + BIT_ULL(48)) - *offset;
41c445ff
JB
566 *stat &= 0xFFFFFFFFFFFFULL;
567}
568
569/**
570 * i40e_stat_update32 - read and update a 32 bit stat from the chip
571 * @hw: ptr to the hardware info
572 * @reg: the hw reg to read
573 * @offset_loaded: has the initial offset been loaded yet
574 * @offset: ptr to current offset value
575 * @stat: ptr to the stat
576 **/
577static void i40e_stat_update32(struct i40e_hw *hw, u32 reg,
578 bool offset_loaded, u64 *offset, u64 *stat)
579{
580 u32 new_data;
581
582 new_data = rd32(hw, reg);
583 if (!offset_loaded)
584 *offset = new_data;
585 if (likely(new_data >= *offset))
586 *stat = (u32)(new_data - *offset);
587 else
41a1d04b 588 *stat = (u32)((new_data + BIT_ULL(32)) - *offset);
41c445ff
JB
589}
590
0dc8692e
MS
591/**
592 * i40e_stat_update_and_clear32 - read and clear hw reg, update a 32 bit stat
593 * @hw: ptr to the hardware info
594 * @reg: the hw reg to read and clear
595 * @stat: ptr to the stat
596 **/
597static void i40e_stat_update_and_clear32(struct i40e_hw *hw, u32 reg, u64 *stat)
598{
599 u32 new_data = rd32(hw, reg);
600
601 wr32(hw, reg, 1); /* must write a nonzero value to clear register */
602 *stat += new_data;
603}
604
41c445ff
JB
605/**
606 * i40e_update_eth_stats - Update VSI-specific ethernet statistics counters.
607 * @vsi: the VSI to be updated
608 **/
609void i40e_update_eth_stats(struct i40e_vsi *vsi)
610{
611 int stat_idx = le16_to_cpu(vsi->info.stat_counter_idx);
612 struct i40e_pf *pf = vsi->back;
613 struct i40e_hw *hw = &pf->hw;
614 struct i40e_eth_stats *oes;
615 struct i40e_eth_stats *es; /* device's eth stats */
616
617 es = &vsi->eth_stats;
618 oes = &vsi->eth_stats_offsets;
619
620 /* Gather up the stats that the hw collects */
621 i40e_stat_update32(hw, I40E_GLV_TEPC(stat_idx),
622 vsi->stat_offsets_loaded,
623 &oes->tx_errors, &es->tx_errors);
624 i40e_stat_update32(hw, I40E_GLV_RDPC(stat_idx),
625 vsi->stat_offsets_loaded,
626 &oes->rx_discards, &es->rx_discards);
41a9e55c
SN
627 i40e_stat_update32(hw, I40E_GLV_RUPP(stat_idx),
628 vsi->stat_offsets_loaded,
629 &oes->rx_unknown_protocol, &es->rx_unknown_protocol);
630 i40e_stat_update32(hw, I40E_GLV_TEPC(stat_idx),
631 vsi->stat_offsets_loaded,
632 &oes->tx_errors, &es->tx_errors);
41c445ff
JB
633
634 i40e_stat_update48(hw, I40E_GLV_GORCH(stat_idx),
635 I40E_GLV_GORCL(stat_idx),
636 vsi->stat_offsets_loaded,
637 &oes->rx_bytes, &es->rx_bytes);
638 i40e_stat_update48(hw, I40E_GLV_UPRCH(stat_idx),
639 I40E_GLV_UPRCL(stat_idx),
640 vsi->stat_offsets_loaded,
641 &oes->rx_unicast, &es->rx_unicast);
642 i40e_stat_update48(hw, I40E_GLV_MPRCH(stat_idx),
643 I40E_GLV_MPRCL(stat_idx),
644 vsi->stat_offsets_loaded,
645 &oes->rx_multicast, &es->rx_multicast);
646 i40e_stat_update48(hw, I40E_GLV_BPRCH(stat_idx),
647 I40E_GLV_BPRCL(stat_idx),
648 vsi->stat_offsets_loaded,
649 &oes->rx_broadcast, &es->rx_broadcast);
650
651 i40e_stat_update48(hw, I40E_GLV_GOTCH(stat_idx),
652 I40E_GLV_GOTCL(stat_idx),
653 vsi->stat_offsets_loaded,
654 &oes->tx_bytes, &es->tx_bytes);
655 i40e_stat_update48(hw, I40E_GLV_UPTCH(stat_idx),
656 I40E_GLV_UPTCL(stat_idx),
657 vsi->stat_offsets_loaded,
658 &oes->tx_unicast, &es->tx_unicast);
659 i40e_stat_update48(hw, I40E_GLV_MPTCH(stat_idx),
660 I40E_GLV_MPTCL(stat_idx),
661 vsi->stat_offsets_loaded,
662 &oes->tx_multicast, &es->tx_multicast);
663 i40e_stat_update48(hw, I40E_GLV_BPTCH(stat_idx),
664 I40E_GLV_BPTCL(stat_idx),
665 vsi->stat_offsets_loaded,
666 &oes->tx_broadcast, &es->tx_broadcast);
667 vsi->stat_offsets_loaded = true;
668}
669
670/**
671 * i40e_update_veb_stats - Update Switch component statistics
672 * @veb: the VEB being updated
673 **/
674static void i40e_update_veb_stats(struct i40e_veb *veb)
675{
676 struct i40e_pf *pf = veb->pf;
677 struct i40e_hw *hw = &pf->hw;
678 struct i40e_eth_stats *oes;
679 struct i40e_eth_stats *es; /* device's eth stats */
fe860afb
NP
680 struct i40e_veb_tc_stats *veb_oes;
681 struct i40e_veb_tc_stats *veb_es;
682 int i, idx = 0;
41c445ff
JB
683
684 idx = veb->stats_idx;
685 es = &veb->stats;
686 oes = &veb->stats_offsets;
fe860afb
NP
687 veb_es = &veb->tc_stats;
688 veb_oes = &veb->tc_stats_offsets;
41c445ff
JB
689
690 /* Gather up the stats that the hw collects */
691 i40e_stat_update32(hw, I40E_GLSW_TDPC(idx),
692 veb->stat_offsets_loaded,
693 &oes->tx_discards, &es->tx_discards);
7134f9ce
JB
694 if (hw->revision_id > 0)
695 i40e_stat_update32(hw, I40E_GLSW_RUPP(idx),
696 veb->stat_offsets_loaded,
697 &oes->rx_unknown_protocol,
698 &es->rx_unknown_protocol);
41c445ff
JB
699 i40e_stat_update48(hw, I40E_GLSW_GORCH(idx), I40E_GLSW_GORCL(idx),
700 veb->stat_offsets_loaded,
701 &oes->rx_bytes, &es->rx_bytes);
702 i40e_stat_update48(hw, I40E_GLSW_UPRCH(idx), I40E_GLSW_UPRCL(idx),
703 veb->stat_offsets_loaded,
704 &oes->rx_unicast, &es->rx_unicast);
705 i40e_stat_update48(hw, I40E_GLSW_MPRCH(idx), I40E_GLSW_MPRCL(idx),
706 veb->stat_offsets_loaded,
707 &oes->rx_multicast, &es->rx_multicast);
708 i40e_stat_update48(hw, I40E_GLSW_BPRCH(idx), I40E_GLSW_BPRCL(idx),
709 veb->stat_offsets_loaded,
710 &oes->rx_broadcast, &es->rx_broadcast);
711
712 i40e_stat_update48(hw, I40E_GLSW_GOTCH(idx), I40E_GLSW_GOTCL(idx),
713 veb->stat_offsets_loaded,
714 &oes->tx_bytes, &es->tx_bytes);
715 i40e_stat_update48(hw, I40E_GLSW_UPTCH(idx), I40E_GLSW_UPTCL(idx),
716 veb->stat_offsets_loaded,
717 &oes->tx_unicast, &es->tx_unicast);
718 i40e_stat_update48(hw, I40E_GLSW_MPTCH(idx), I40E_GLSW_MPTCL(idx),
719 veb->stat_offsets_loaded,
720 &oes->tx_multicast, &es->tx_multicast);
721 i40e_stat_update48(hw, I40E_GLSW_BPTCH(idx), I40E_GLSW_BPTCL(idx),
722 veb->stat_offsets_loaded,
723 &oes->tx_broadcast, &es->tx_broadcast);
fe860afb
NP
724 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
725 i40e_stat_update48(hw, I40E_GLVEBTC_RPCH(i, idx),
726 I40E_GLVEBTC_RPCL(i, idx),
727 veb->stat_offsets_loaded,
728 &veb_oes->tc_rx_packets[i],
729 &veb_es->tc_rx_packets[i]);
730 i40e_stat_update48(hw, I40E_GLVEBTC_RBCH(i, idx),
731 I40E_GLVEBTC_RBCL(i, idx),
732 veb->stat_offsets_loaded,
733 &veb_oes->tc_rx_bytes[i],
734 &veb_es->tc_rx_bytes[i]);
735 i40e_stat_update48(hw, I40E_GLVEBTC_TPCH(i, idx),
736 I40E_GLVEBTC_TPCL(i, idx),
737 veb->stat_offsets_loaded,
738 &veb_oes->tc_tx_packets[i],
739 &veb_es->tc_tx_packets[i]);
740 i40e_stat_update48(hw, I40E_GLVEBTC_TBCH(i, idx),
741 I40E_GLVEBTC_TBCL(i, idx),
742 veb->stat_offsets_loaded,
743 &veb_oes->tc_tx_bytes[i],
744 &veb_es->tc_tx_bytes[i]);
745 }
41c445ff
JB
746 veb->stat_offsets_loaded = true;
747}
748
41c445ff 749/**
7812fddc 750 * i40e_update_vsi_stats - Update the vsi statistics counters.
41c445ff
JB
751 * @vsi: the VSI to be updated
752 *
753 * There are a few instances where we store the same stat in a
754 * couple of different structs. This is partly because we have
755 * the netdev stats that need to be filled out, which is slightly
756 * different from the "eth_stats" defined by the chip and used in
7812fddc 757 * VF communications. We sort it out here.
41c445ff 758 **/
7812fddc 759static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
41c445ff
JB
760{
761 struct i40e_pf *pf = vsi->back;
41c445ff
JB
762 struct rtnl_link_stats64 *ons;
763 struct rtnl_link_stats64 *ns; /* netdev stats */
764 struct i40e_eth_stats *oes;
765 struct i40e_eth_stats *es; /* device's eth stats */
766 u32 tx_restart, tx_busy;
bf00b376 767 struct i40e_ring *p;
41c445ff 768 u32 rx_page, rx_buf;
bf00b376
AA
769 u64 bytes, packets;
770 unsigned int start;
2fc3d715 771 u64 tx_linearize;
164c9f54 772 u64 tx_force_wb;
41c445ff
JB
773 u64 rx_p, rx_b;
774 u64 tx_p, tx_b;
41c445ff
JB
775 u16 q;
776
0da36b97
JK
777 if (test_bit(__I40E_VSI_DOWN, vsi->state) ||
778 test_bit(__I40E_CONFIG_BUSY, pf->state))
41c445ff
JB
779 return;
780
781 ns = i40e_get_vsi_stats_struct(vsi);
782 ons = &vsi->net_stats_offsets;
783 es = &vsi->eth_stats;
784 oes = &vsi->eth_stats_offsets;
785
786 /* Gather up the netdev and vsi stats that the driver collects
787 * on the fly during packet processing
788 */
789 rx_b = rx_p = 0;
790 tx_b = tx_p = 0;
164c9f54 791 tx_restart = tx_busy = tx_linearize = tx_force_wb = 0;
41c445ff
JB
792 rx_page = 0;
793 rx_buf = 0;
980e9b11 794 rcu_read_lock();
41c445ff 795 for (q = 0; q < vsi->num_queue_pairs; q++) {
980e9b11 796 /* locate Tx ring */
6aa7de05 797 p = READ_ONCE(vsi->tx_rings[q]);
980e9b11
AD
798
799 do {
57a7744e 800 start = u64_stats_fetch_begin_irq(&p->syncp);
980e9b11
AD
801 packets = p->stats.packets;
802 bytes = p->stats.bytes;
57a7744e 803 } while (u64_stats_fetch_retry_irq(&p->syncp, start));
980e9b11
AD
804 tx_b += bytes;
805 tx_p += packets;
806 tx_restart += p->tx_stats.restart_queue;
807 tx_busy += p->tx_stats.tx_busy;
2fc3d715 808 tx_linearize += p->tx_stats.tx_linearize;
164c9f54 809 tx_force_wb += p->tx_stats.tx_force_wb;
41c445ff 810
980e9b11
AD
811 /* Rx queue is part of the same block as Tx queue */
812 p = &p[1];
813 do {
57a7744e 814 start = u64_stats_fetch_begin_irq(&p->syncp);
980e9b11
AD
815 packets = p->stats.packets;
816 bytes = p->stats.bytes;
57a7744e 817 } while (u64_stats_fetch_retry_irq(&p->syncp, start));
980e9b11
AD
818 rx_b += bytes;
819 rx_p += packets;
420136cc
MW
820 rx_buf += p->rx_stats.alloc_buff_failed;
821 rx_page += p->rx_stats.alloc_page_failed;
41c445ff 822 }
980e9b11 823 rcu_read_unlock();
41c445ff
JB
824 vsi->tx_restart = tx_restart;
825 vsi->tx_busy = tx_busy;
2fc3d715 826 vsi->tx_linearize = tx_linearize;
164c9f54 827 vsi->tx_force_wb = tx_force_wb;
41c445ff
JB
828 vsi->rx_page_failed = rx_page;
829 vsi->rx_buf_failed = rx_buf;
830
831 ns->rx_packets = rx_p;
832 ns->rx_bytes = rx_b;
833 ns->tx_packets = tx_p;
834 ns->tx_bytes = tx_b;
835
41c445ff 836 /* update netdev stats from eth stats */
7812fddc 837 i40e_update_eth_stats(vsi);
41c445ff
JB
838 ons->tx_errors = oes->tx_errors;
839 ns->tx_errors = es->tx_errors;
840 ons->multicast = oes->rx_multicast;
841 ns->multicast = es->rx_multicast;
41a9e55c
SN
842 ons->rx_dropped = oes->rx_discards;
843 ns->rx_dropped = es->rx_discards;
41c445ff
JB
844 ons->tx_dropped = oes->tx_discards;
845 ns->tx_dropped = es->tx_discards;
846
7812fddc 847 /* pull in a couple PF stats if this is the main vsi */
41c445ff 848 if (vsi == pf->vsi[pf->lan_vsi]) {
7812fddc
SN
849 ns->rx_crc_errors = pf->stats.crc_errors;
850 ns->rx_errors = pf->stats.crc_errors + pf->stats.illegal_bytes;
851 ns->rx_length_errors = pf->stats.rx_length_errors;
852 }
853}
41c445ff 854
7812fddc 855/**
b40c82e6 856 * i40e_update_pf_stats - Update the PF statistics counters.
7812fddc
SN
857 * @pf: the PF to be updated
858 **/
859static void i40e_update_pf_stats(struct i40e_pf *pf)
860{
861 struct i40e_hw_port_stats *osd = &pf->stats_offsets;
862 struct i40e_hw_port_stats *nsd = &pf->stats;
863 struct i40e_hw *hw = &pf->hw;
864 u32 val;
865 int i;
41c445ff 866
7812fddc
SN
867 i40e_stat_update48(hw, I40E_GLPRT_GORCH(hw->port),
868 I40E_GLPRT_GORCL(hw->port),
869 pf->stat_offsets_loaded,
870 &osd->eth.rx_bytes, &nsd->eth.rx_bytes);
871 i40e_stat_update48(hw, I40E_GLPRT_GOTCH(hw->port),
872 I40E_GLPRT_GOTCL(hw->port),
873 pf->stat_offsets_loaded,
874 &osd->eth.tx_bytes, &nsd->eth.tx_bytes);
875 i40e_stat_update32(hw, I40E_GLPRT_RDPC(hw->port),
876 pf->stat_offsets_loaded,
877 &osd->eth.rx_discards,
878 &nsd->eth.rx_discards);
532d283d
SN
879 i40e_stat_update48(hw, I40E_GLPRT_UPRCH(hw->port),
880 I40E_GLPRT_UPRCL(hw->port),
881 pf->stat_offsets_loaded,
882 &osd->eth.rx_unicast,
883 &nsd->eth.rx_unicast);
7812fddc
SN
884 i40e_stat_update48(hw, I40E_GLPRT_MPRCH(hw->port),
885 I40E_GLPRT_MPRCL(hw->port),
886 pf->stat_offsets_loaded,
887 &osd->eth.rx_multicast,
888 &nsd->eth.rx_multicast);
532d283d
SN
889 i40e_stat_update48(hw, I40E_GLPRT_BPRCH(hw->port),
890 I40E_GLPRT_BPRCL(hw->port),
891 pf->stat_offsets_loaded,
892 &osd->eth.rx_broadcast,
893 &nsd->eth.rx_broadcast);
894 i40e_stat_update48(hw, I40E_GLPRT_UPTCH(hw->port),
895 I40E_GLPRT_UPTCL(hw->port),
896 pf->stat_offsets_loaded,
897 &osd->eth.tx_unicast,
898 &nsd->eth.tx_unicast);
899 i40e_stat_update48(hw, I40E_GLPRT_MPTCH(hw->port),
900 I40E_GLPRT_MPTCL(hw->port),
901 pf->stat_offsets_loaded,
902 &osd->eth.tx_multicast,
903 &nsd->eth.tx_multicast);
904 i40e_stat_update48(hw, I40E_GLPRT_BPTCH(hw->port),
905 I40E_GLPRT_BPTCL(hw->port),
906 pf->stat_offsets_loaded,
907 &osd->eth.tx_broadcast,
908 &nsd->eth.tx_broadcast);
41c445ff 909
7812fddc
SN
910 i40e_stat_update32(hw, I40E_GLPRT_TDOLD(hw->port),
911 pf->stat_offsets_loaded,
912 &osd->tx_dropped_link_down,
913 &nsd->tx_dropped_link_down);
41c445ff 914
7812fddc
SN
915 i40e_stat_update32(hw, I40E_GLPRT_CRCERRS(hw->port),
916 pf->stat_offsets_loaded,
917 &osd->crc_errors, &nsd->crc_errors);
41c445ff 918
7812fddc
SN
919 i40e_stat_update32(hw, I40E_GLPRT_ILLERRC(hw->port),
920 pf->stat_offsets_loaded,
921 &osd->illegal_bytes, &nsd->illegal_bytes);
41c445ff 922
7812fddc
SN
923 i40e_stat_update32(hw, I40E_GLPRT_MLFC(hw->port),
924 pf->stat_offsets_loaded,
925 &osd->mac_local_faults,
926 &nsd->mac_local_faults);
927 i40e_stat_update32(hw, I40E_GLPRT_MRFC(hw->port),
928 pf->stat_offsets_loaded,
929 &osd->mac_remote_faults,
930 &nsd->mac_remote_faults);
41c445ff 931
7812fddc
SN
932 i40e_stat_update32(hw, I40E_GLPRT_RLEC(hw->port),
933 pf->stat_offsets_loaded,
934 &osd->rx_length_errors,
935 &nsd->rx_length_errors);
41c445ff 936
7812fddc
SN
937 i40e_stat_update32(hw, I40E_GLPRT_LXONRXC(hw->port),
938 pf->stat_offsets_loaded,
939 &osd->link_xon_rx, &nsd->link_xon_rx);
940 i40e_stat_update32(hw, I40E_GLPRT_LXONTXC(hw->port),
941 pf->stat_offsets_loaded,
942 &osd->link_xon_tx, &nsd->link_xon_tx);
95db239f
NP
943 i40e_stat_update32(hw, I40E_GLPRT_LXOFFRXC(hw->port),
944 pf->stat_offsets_loaded,
945 &osd->link_xoff_rx, &nsd->link_xoff_rx);
7812fddc
SN
946 i40e_stat_update32(hw, I40E_GLPRT_LXOFFTXC(hw->port),
947 pf->stat_offsets_loaded,
948 &osd->link_xoff_tx, &nsd->link_xoff_tx);
41c445ff 949
7812fddc 950 for (i = 0; i < 8; i++) {
95db239f
NP
951 i40e_stat_update32(hw, I40E_GLPRT_PXOFFRXC(hw->port, i),
952 pf->stat_offsets_loaded,
953 &osd->priority_xoff_rx[i],
954 &nsd->priority_xoff_rx[i]);
7812fddc 955 i40e_stat_update32(hw, I40E_GLPRT_PXONRXC(hw->port, i),
41c445ff 956 pf->stat_offsets_loaded,
7812fddc
SN
957 &osd->priority_xon_rx[i],
958 &nsd->priority_xon_rx[i]);
959 i40e_stat_update32(hw, I40E_GLPRT_PXONTXC(hw->port, i),
41c445ff 960 pf->stat_offsets_loaded,
7812fddc
SN
961 &osd->priority_xon_tx[i],
962 &nsd->priority_xon_tx[i]);
963 i40e_stat_update32(hw, I40E_GLPRT_PXOFFTXC(hw->port, i),
41c445ff 964 pf->stat_offsets_loaded,
7812fddc
SN
965 &osd->priority_xoff_tx[i],
966 &nsd->priority_xoff_tx[i]);
967 i40e_stat_update32(hw,
968 I40E_GLPRT_RXON2OFFCNT(hw->port, i),
bee5af7e 969 pf->stat_offsets_loaded,
7812fddc
SN
970 &osd->priority_xon_2_xoff[i],
971 &nsd->priority_xon_2_xoff[i]);
41c445ff
JB
972 }
973
7812fddc
SN
974 i40e_stat_update48(hw, I40E_GLPRT_PRC64H(hw->port),
975 I40E_GLPRT_PRC64L(hw->port),
976 pf->stat_offsets_loaded,
977 &osd->rx_size_64, &nsd->rx_size_64);
978 i40e_stat_update48(hw, I40E_GLPRT_PRC127H(hw->port),
979 I40E_GLPRT_PRC127L(hw->port),
980 pf->stat_offsets_loaded,
981 &osd->rx_size_127, &nsd->rx_size_127);
982 i40e_stat_update48(hw, I40E_GLPRT_PRC255H(hw->port),
983 I40E_GLPRT_PRC255L(hw->port),
984 pf->stat_offsets_loaded,
985 &osd->rx_size_255, &nsd->rx_size_255);
986 i40e_stat_update48(hw, I40E_GLPRT_PRC511H(hw->port),
987 I40E_GLPRT_PRC511L(hw->port),
988 pf->stat_offsets_loaded,
989 &osd->rx_size_511, &nsd->rx_size_511);
990 i40e_stat_update48(hw, I40E_GLPRT_PRC1023H(hw->port),
991 I40E_GLPRT_PRC1023L(hw->port),
992 pf->stat_offsets_loaded,
993 &osd->rx_size_1023, &nsd->rx_size_1023);
994 i40e_stat_update48(hw, I40E_GLPRT_PRC1522H(hw->port),
995 I40E_GLPRT_PRC1522L(hw->port),
996 pf->stat_offsets_loaded,
997 &osd->rx_size_1522, &nsd->rx_size_1522);
998 i40e_stat_update48(hw, I40E_GLPRT_PRC9522H(hw->port),
999 I40E_GLPRT_PRC9522L(hw->port),
1000 pf->stat_offsets_loaded,
1001 &osd->rx_size_big, &nsd->rx_size_big);
1002
1003 i40e_stat_update48(hw, I40E_GLPRT_PTC64H(hw->port),
1004 I40E_GLPRT_PTC64L(hw->port),
1005 pf->stat_offsets_loaded,
1006 &osd->tx_size_64, &nsd->tx_size_64);
1007 i40e_stat_update48(hw, I40E_GLPRT_PTC127H(hw->port),
1008 I40E_GLPRT_PTC127L(hw->port),
1009 pf->stat_offsets_loaded,
1010 &osd->tx_size_127, &nsd->tx_size_127);
1011 i40e_stat_update48(hw, I40E_GLPRT_PTC255H(hw->port),
1012 I40E_GLPRT_PTC255L(hw->port),
1013 pf->stat_offsets_loaded,
1014 &osd->tx_size_255, &nsd->tx_size_255);
1015 i40e_stat_update48(hw, I40E_GLPRT_PTC511H(hw->port),
1016 I40E_GLPRT_PTC511L(hw->port),
1017 pf->stat_offsets_loaded,
1018 &osd->tx_size_511, &nsd->tx_size_511);
1019 i40e_stat_update48(hw, I40E_GLPRT_PTC1023H(hw->port),
1020 I40E_GLPRT_PTC1023L(hw->port),
1021 pf->stat_offsets_loaded,
1022 &osd->tx_size_1023, &nsd->tx_size_1023);
1023 i40e_stat_update48(hw, I40E_GLPRT_PTC1522H(hw->port),
1024 I40E_GLPRT_PTC1522L(hw->port),
1025 pf->stat_offsets_loaded,
1026 &osd->tx_size_1522, &nsd->tx_size_1522);
1027 i40e_stat_update48(hw, I40E_GLPRT_PTC9522H(hw->port),
1028 I40E_GLPRT_PTC9522L(hw->port),
1029 pf->stat_offsets_loaded,
1030 &osd->tx_size_big, &nsd->tx_size_big);
1031
1032 i40e_stat_update32(hw, I40E_GLPRT_RUC(hw->port),
1033 pf->stat_offsets_loaded,
1034 &osd->rx_undersize, &nsd->rx_undersize);
1035 i40e_stat_update32(hw, I40E_GLPRT_RFC(hw->port),
1036 pf->stat_offsets_loaded,
1037 &osd->rx_fragments, &nsd->rx_fragments);
1038 i40e_stat_update32(hw, I40E_GLPRT_ROC(hw->port),
1039 pf->stat_offsets_loaded,
1040 &osd->rx_oversize, &nsd->rx_oversize);
1041 i40e_stat_update32(hw, I40E_GLPRT_RJC(hw->port),
1042 pf->stat_offsets_loaded,
1043 &osd->rx_jabber, &nsd->rx_jabber);
1044
433c47de 1045 /* FDIR stats */
0dc8692e
MS
1046 i40e_stat_update_and_clear32(hw,
1047 I40E_GLQF_PCNT(I40E_FD_ATR_STAT_IDX(hw->pf_id)),
1048 &nsd->fd_atr_match);
1049 i40e_stat_update_and_clear32(hw,
1050 I40E_GLQF_PCNT(I40E_FD_SB_STAT_IDX(hw->pf_id)),
1051 &nsd->fd_sb_match);
1052 i40e_stat_update_and_clear32(hw,
1053 I40E_GLQF_PCNT(I40E_FD_ATR_TUNNEL_STAT_IDX(hw->pf_id)),
1054 &nsd->fd_atr_tunnel_match);
433c47de 1055
7812fddc
SN
1056 val = rd32(hw, I40E_PRTPM_EEE_STAT);
1057 nsd->tx_lpi_status =
1058 (val & I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_MASK) >>
1059 I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_SHIFT;
1060 nsd->rx_lpi_status =
1061 (val & I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_MASK) >>
1062 I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_SHIFT;
1063 i40e_stat_update32(hw, I40E_PRTPM_TLPIC,
1064 pf->stat_offsets_loaded,
1065 &osd->tx_lpi_count, &nsd->tx_lpi_count);
1066 i40e_stat_update32(hw, I40E_PRTPM_RLPIC,
1067 pf->stat_offsets_loaded,
1068 &osd->rx_lpi_count, &nsd->rx_lpi_count);
1069
d0389e51 1070 if (pf->flags & I40E_FLAG_FD_SB_ENABLED &&
134201ae 1071 !test_bit(__I40E_FD_SB_AUTO_DISABLED, pf->state))
d0389e51
ASJ
1072 nsd->fd_sb_status = true;
1073 else
1074 nsd->fd_sb_status = false;
1075
1076 if (pf->flags & I40E_FLAG_FD_ATR_ENABLED &&
134201ae 1077 !test_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state))
d0389e51
ASJ
1078 nsd->fd_atr_status = true;
1079 else
1080 nsd->fd_atr_status = false;
1081
41c445ff
JB
1082 pf->stat_offsets_loaded = true;
1083}
1084
7812fddc
SN
1085/**
1086 * i40e_update_stats - Update the various statistics counters.
1087 * @vsi: the VSI to be updated
1088 *
1089 * Update the various stats for this VSI and its related entities.
1090 **/
1091void i40e_update_stats(struct i40e_vsi *vsi)
1092{
1093 struct i40e_pf *pf = vsi->back;
1094
1095 if (vsi == pf->vsi[pf->lan_vsi])
1096 i40e_update_pf_stats(pf);
1097
1098 i40e_update_vsi_stats(vsi);
1099}
1100
41c445ff
JB
1101/**
1102 * i40e_find_filter - Search VSI filter list for specific mac/vlan filter
1103 * @vsi: the VSI to be searched
1104 * @macaddr: the MAC address
1105 * @vlan: the vlan
41c445ff
JB
1106 *
1107 * Returns ptr to the filter object or NULL
1108 **/
1109static struct i40e_mac_filter *i40e_find_filter(struct i40e_vsi *vsi,
6622f5cd 1110 const u8 *macaddr, s16 vlan)
41c445ff
JB
1111{
1112 struct i40e_mac_filter *f;
278e7d0b 1113 u64 key;
41c445ff
JB
1114
1115 if (!vsi || !macaddr)
1116 return NULL;
1117
278e7d0b
JK
1118 key = i40e_addr_to_hkey(macaddr);
1119 hash_for_each_possible(vsi->mac_filter_hash, f, hlist, key) {
41c445ff 1120 if ((ether_addr_equal(macaddr, f->macaddr)) &&
1bc87e80 1121 (vlan == f->vlan))
41c445ff
JB
1122 return f;
1123 }
1124 return NULL;
1125}
1126
1127/**
1128 * i40e_find_mac - Find a mac addr in the macvlan filters list
1129 * @vsi: the VSI to be searched
1130 * @macaddr: the MAC address we are searching for
41c445ff
JB
1131 *
1132 * Returns the first filter with the provided MAC address or NULL if
1133 * MAC address was not found
1134 **/
6622f5cd 1135struct i40e_mac_filter *i40e_find_mac(struct i40e_vsi *vsi, const u8 *macaddr)
41c445ff
JB
1136{
1137 struct i40e_mac_filter *f;
278e7d0b 1138 u64 key;
41c445ff
JB
1139
1140 if (!vsi || !macaddr)
1141 return NULL;
1142
278e7d0b
JK
1143 key = i40e_addr_to_hkey(macaddr);
1144 hash_for_each_possible(vsi->mac_filter_hash, f, hlist, key) {
1bc87e80 1145 if ((ether_addr_equal(macaddr, f->macaddr)))
41c445ff
JB
1146 return f;
1147 }
1148 return NULL;
1149}
1150
1151/**
1152 * i40e_is_vsi_in_vlan - Check if VSI is in vlan mode
1153 * @vsi: the VSI to be searched
1154 *
1155 * Returns true if VSI is in vlan mode or false otherwise
1156 **/
1157bool i40e_is_vsi_in_vlan(struct i40e_vsi *vsi)
1158{
cbebb85f
JK
1159 /* If we have a PVID, always operate in VLAN mode */
1160 if (vsi->info.pvid)
1161 return true;
1162
1163 /* We need to operate in VLAN mode whenever we have any filters with
1164 * a VLAN other than I40E_VLAN_ALL. We could check the table each
1165 * time, incurring search cost repeatedly. However, we can notice two
1166 * things:
1167 *
1168 * 1) the only place where we can gain a VLAN filter is in
1169 * i40e_add_filter.
1170 *
1171 * 2) the only place where filters are actually removed is in
0b7c8b5d 1172 * i40e_sync_filters_subtask.
cbebb85f
JK
1173 *
1174 * Thus, we can simply use a boolean value, has_vlan_filters which we
1175 * will set to true when we add a VLAN filter in i40e_add_filter. Then
1176 * we have to perform the full search after deleting filters in
0b7c8b5d 1177 * i40e_sync_filters_subtask, but we already have to search
cbebb85f
JK
1178 * filters here and can perform the check at the same time. This
1179 * results in avoiding embedding a loop for VLAN mode inside another
1180 * loop over all the filters, and should maintain correctness as noted
1181 * above.
41c445ff 1182 */
cbebb85f 1183 return vsi->has_vlan_filter;
41c445ff
JB
1184}
1185
489a3265
JK
1186/**
1187 * i40e_correct_mac_vlan_filters - Correct non-VLAN filters if necessary
1188 * @vsi: the VSI to configure
1189 * @tmp_add_list: list of filters ready to be added
1190 * @tmp_del_list: list of filters ready to be deleted
1191 * @vlan_filters: the number of active VLAN filters
1192 *
1193 * Update VLAN=0 and VLAN=-1 (I40E_VLAN_ANY) filters properly so that they
1194 * behave as expected. If we have any active VLAN filters remaining or about
1195 * to be added then we need to update non-VLAN filters to be marked as VLAN=0
1196 * so that they only match against untagged traffic. If we no longer have any
1197 * active VLAN filters, we need to make all non-VLAN filters marked as VLAN=-1
1198 * so that they match against both tagged and untagged traffic. In this way,
1199 * we ensure that we correctly receive the desired traffic. This ensures that
1200 * when we have an active VLAN we will receive only untagged traffic and
1201 * traffic matching active VLANs. If we have no active VLANs then we will
1202 * operate in non-VLAN mode and receive all traffic, tagged or untagged.
1203 *
1204 * Finally, in a similar fashion, this function also corrects filters when
1205 * there is an active PVID assigned to this VSI.
1206 *
1207 * In case of memory allocation failure return -ENOMEM. Otherwise, return 0.
1208 *
1209 * This function is only expected to be called from within
1210 * i40e_sync_vsi_filters.
1211 *
1212 * NOTE: This function expects to be called while under the
1213 * mac_filter_hash_lock
1214 */
1215static int i40e_correct_mac_vlan_filters(struct i40e_vsi *vsi,
1216 struct hlist_head *tmp_add_list,
1217 struct hlist_head *tmp_del_list,
1218 int vlan_filters)
1219{
5cb25901 1220 s16 pvid = le16_to_cpu(vsi->info.pvid);
489a3265 1221 struct i40e_mac_filter *f, *add_head;
671889e6 1222 struct i40e_new_mac_filter *new;
489a3265
JK
1223 struct hlist_node *h;
1224 int bkt, new_vlan;
1225
1226 /* To determine if a particular filter needs to be replaced we
1227 * have the three following conditions:
1228 *
1229 * a) if we have a PVID assigned, then all filters which are
1230 * not marked as VLAN=PVID must be replaced with filters that
1231 * are.
1232 * b) otherwise, if we have any active VLANS, all filters
1233 * which are marked as VLAN=-1 must be replaced with
1234 * filters marked as VLAN=0
1235 * c) finally, if we do not have any active VLANS, all filters
1236 * which are marked as VLAN=0 must be replaced with filters
1237 * marked as VLAN=-1
1238 */
1239
1240 /* Update the filters about to be added in place */
671889e6 1241 hlist_for_each_entry(new, tmp_add_list, hlist) {
5cb25901
JK
1242 if (pvid && new->f->vlan != pvid)
1243 new->f->vlan = pvid;
671889e6
JK
1244 else if (vlan_filters && new->f->vlan == I40E_VLAN_ANY)
1245 new->f->vlan = 0;
1246 else if (!vlan_filters && new->f->vlan == 0)
1247 new->f->vlan = I40E_VLAN_ANY;
489a3265
JK
1248 }
1249
1250 /* Update the remaining active filters */
1251 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
1252 /* Combine the checks for whether a filter needs to be changed
1253 * and then determine the new VLAN inside the if block, in
1254 * order to avoid duplicating code for adding the new filter
1255 * then deleting the old filter.
1256 */
5cb25901 1257 if ((pvid && f->vlan != pvid) ||
489a3265
JK
1258 (vlan_filters && f->vlan == I40E_VLAN_ANY) ||
1259 (!vlan_filters && f->vlan == 0)) {
1260 /* Determine the new vlan we will be adding */
5cb25901
JK
1261 if (pvid)
1262 new_vlan = pvid;
489a3265
JK
1263 else if (vlan_filters)
1264 new_vlan = 0;
1265 else
1266 new_vlan = I40E_VLAN_ANY;
1267
1268 /* Create the new filter */
1269 add_head = i40e_add_filter(vsi, f->macaddr, new_vlan);
1270 if (!add_head)
1271 return -ENOMEM;
1272
671889e6
JK
1273 /* Create a temporary i40e_new_mac_filter */
1274 new = kzalloc(sizeof(*new), GFP_ATOMIC);
1275 if (!new)
1276 return -ENOMEM;
1277
1278 new->f = add_head;
1279 new->state = add_head->state;
1280
1281 /* Add the new filter to the tmp list */
1282 hlist_add_head(&new->hlist, tmp_add_list);
489a3265
JK
1283
1284 /* Put the original filter into the delete list */
1285 f->state = I40E_FILTER_REMOVE;
1286 hash_del(&f->hlist);
1287 hlist_add_head(&f->hlist, tmp_del_list);
1288 }
1289 }
1290
1291 vsi->has_vlan_filter = !!vlan_filters;
1292
1293 return 0;
1294}
1295
1596b5dd
JK
1296/**
1297 * i40e_rm_default_mac_filter - Remove the default MAC filter set by NVM
1298 * @vsi: the PF Main VSI - inappropriate for any other VSI
1299 * @macaddr: the MAC address
1300 *
1301 * Remove whatever filter the firmware set up so the driver can manage
1302 * its own filtering intelligently.
1303 **/
1304static void i40e_rm_default_mac_filter(struct i40e_vsi *vsi, u8 *macaddr)
1305{
1306 struct i40e_aqc_remove_macvlan_element_data element;
1307 struct i40e_pf *pf = vsi->back;
1308
1309 /* Only appropriate for the PF main VSI */
1310 if (vsi->type != I40E_VSI_MAIN)
1311 return;
1312
1313 memset(&element, 0, sizeof(element));
1314 ether_addr_copy(element.mac_addr, macaddr);
1315 element.vlan_tag = 0;
1316 /* Ignore error returns, some firmware does it this way... */
1317 element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
1318 i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL);
1319
1320 memset(&element, 0, sizeof(element));
1321 ether_addr_copy(element.mac_addr, macaddr);
1322 element.vlan_tag = 0;
1323 /* ...and some firmware does it this way. */
1324 element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
1325 I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
1326 i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL);
1327}
1328
41c445ff
JB
1329/**
1330 * i40e_add_filter - Add a mac/vlan filter to the VSI
1331 * @vsi: the VSI to be searched
1332 * @macaddr: the MAC address
1333 * @vlan: the vlan
41c445ff
JB
1334 *
1335 * Returns ptr to the filter object or NULL when no memory available.
21659035 1336 *
278e7d0b 1337 * NOTE: This function is expected to be called with mac_filter_hash_lock
21659035 1338 * being held.
41c445ff
JB
1339 **/
1340struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi,
6622f5cd 1341 const u8 *macaddr, s16 vlan)
41c445ff
JB
1342{
1343 struct i40e_mac_filter *f;
278e7d0b 1344 u64 key;
41c445ff
JB
1345
1346 if (!vsi || !macaddr)
1347 return NULL;
1348
1bc87e80 1349 f = i40e_find_filter(vsi, macaddr, vlan);
41c445ff
JB
1350 if (!f) {
1351 f = kzalloc(sizeof(*f), GFP_ATOMIC);
1352 if (!f)
1bc87e80 1353 return NULL;
41c445ff 1354
cbebb85f
JK
1355 /* Update the boolean indicating if we need to function in
1356 * VLAN mode.
1357 */
1358 if (vlan >= 0)
1359 vsi->has_vlan_filter = true;
1360
9a173901 1361 ether_addr_copy(f->macaddr, macaddr);
41c445ff 1362 f->vlan = vlan;
7363115e 1363 f->state = I40E_FILTER_NEW;
278e7d0b
JK
1364 INIT_HLIST_NODE(&f->hlist);
1365
1366 key = i40e_addr_to_hkey(macaddr);
1367 hash_add(vsi->mac_filter_hash, &f->hlist, key);
41c445ff 1368
41c445ff 1369 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
bfe040c3 1370 set_bit(__I40E_MACVLAN_SYNC_PENDING, vsi->back->state);
41c445ff
JB
1371 }
1372
1bc87e80
JK
1373 /* If we're asked to add a filter that has been marked for removal, it
1374 * is safe to simply restore it to active state. __i40e_del_filter
1375 * will have simply deleted any filters which were previously marked
1376 * NEW or FAILED, so if it is currently marked REMOVE it must have
1377 * previously been ACTIVE. Since we haven't yet run the sync filters
1378 * task, just restore this filter to the ACTIVE state so that the
1379 * sync task leaves it in place
1380 */
1381 if (f->state == I40E_FILTER_REMOVE)
1382 f->state = I40E_FILTER_ACTIVE;
1383
41c445ff
JB
1384 return f;
1385}
1386
1387/**
290d2557
JK
1388 * __i40e_del_filter - Remove a specific filter from the VSI
1389 * @vsi: VSI to remove from
1390 * @f: the filter to remove from the list
1391 *
1392 * This function should be called instead of i40e_del_filter only if you know
1393 * the exact filter you will remove already, such as via i40e_find_filter or
1394 * i40e_find_mac.
21659035 1395 *
278e7d0b 1396 * NOTE: This function is expected to be called with mac_filter_hash_lock
21659035 1397 * being held.
c3c7ea27
MW
1398 * ANOTHER NOTE: This function MUST be called from within the context of
1399 * the "safe" variants of any list iterators, e.g. list_for_each_entry_safe()
1400 * instead of list_for_each_entry().
41c445ff 1401 **/
148141bb 1402void __i40e_del_filter(struct i40e_vsi *vsi, struct i40e_mac_filter *f)
41c445ff 1403{
1bc87e80 1404 if (!f)
41c445ff
JB
1405 return;
1406
a410c821
AB
1407 /* If the filter was never added to firmware then we can just delete it
1408 * directly and we don't want to set the status to remove or else an
1409 * admin queue command will unnecessarily fire.
1410 */
1bc87e80
JK
1411 if ((f->state == I40E_FILTER_FAILED) ||
1412 (f->state == I40E_FILTER_NEW)) {
278e7d0b 1413 hash_del(&f->hlist);
1bc87e80 1414 kfree(f);
41c445ff 1415 } else {
1bc87e80 1416 f->state = I40E_FILTER_REMOVE;
41c445ff 1417 }
a410c821
AB
1418
1419 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
eab077aa 1420 set_bit(__I40E_MACVLAN_SYNC_PENDING, vsi->back->state);
41c445ff
JB
1421}
1422
290d2557
JK
1423/**
1424 * i40e_del_filter - Remove a MAC/VLAN filter from the VSI
1425 * @vsi: the VSI to be searched
1426 * @macaddr: the MAC address
1427 * @vlan: the VLAN
1428 *
278e7d0b 1429 * NOTE: This function is expected to be called with mac_filter_hash_lock
290d2557
JK
1430 * being held.
1431 * ANOTHER NOTE: This function MUST be called from within the context of
1432 * the "safe" variants of any list iterators, e.g. list_for_each_entry_safe()
1433 * instead of list_for_each_entry().
1434 **/
1435void i40e_del_filter(struct i40e_vsi *vsi, const u8 *macaddr, s16 vlan)
1436{
1437 struct i40e_mac_filter *f;
1438
1439 if (!vsi || !macaddr)
1440 return;
1441
1442 f = i40e_find_filter(vsi, macaddr, vlan);
1443 __i40e_del_filter(vsi, f);
1444}
1445
35ec2ff3 1446/**
feffdbe4 1447 * i40e_add_mac_filter - Add a MAC filter for all active VLANs
35ec2ff3
JK
1448 * @vsi: the VSI to be searched
1449 * @macaddr: the mac address to be filtered
1450 *
feffdbe4
JK
1451 * If we're not in VLAN mode, just add the filter to I40E_VLAN_ANY. Otherwise,
1452 * go through all the macvlan filters and add a macvlan filter for each
5feb3d7b
JK
1453 * unique vlan that already exists. If a PVID has been assigned, instead only
1454 * add the macaddr to that VLAN.
35ec2ff3 1455 *
5feb3d7b 1456 * Returns last filter added on success, else NULL
35ec2ff3 1457 **/
feffdbe4
JK
1458struct i40e_mac_filter *i40e_add_mac_filter(struct i40e_vsi *vsi,
1459 const u8 *macaddr)
35ec2ff3 1460{
5feb3d7b 1461 struct i40e_mac_filter *f, *add = NULL;
278e7d0b
JK
1462 struct hlist_node *h;
1463 int bkt;
5feb3d7b
JK
1464
1465 if (vsi->info.pvid)
1466 return i40e_add_filter(vsi, macaddr,
1467 le16_to_cpu(vsi->info.pvid));
35ec2ff3 1468
7aaf9536
JK
1469 if (!i40e_is_vsi_in_vlan(vsi))
1470 return i40e_add_filter(vsi, macaddr, I40E_VLAN_ANY);
1471
278e7d0b 1472 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
57b341d6
JK
1473 if (f->state == I40E_FILTER_REMOVE)
1474 continue;
5feb3d7b
JK
1475 add = i40e_add_filter(vsi, macaddr, f->vlan);
1476 if (!add)
1477 return NULL;
35ec2ff3
JK
1478 }
1479
5feb3d7b 1480 return add;
35ec2ff3
JK
1481}
1482
1483/**
feffdbe4 1484 * i40e_del_mac_filter - Remove a MAC filter from all VLANs
35ec2ff3
JK
1485 * @vsi: the VSI to be searched
1486 * @macaddr: the mac address to be removed
1487 *
feffdbe4
JK
1488 * Removes a given MAC address from a VSI regardless of what VLAN it has been
1489 * associated with.
35ec2ff3
JK
1490 *
1491 * Returns 0 for success, or error
1492 **/
feffdbe4 1493int i40e_del_mac_filter(struct i40e_vsi *vsi, const u8 *macaddr)
35ec2ff3 1494{
278e7d0b
JK
1495 struct i40e_mac_filter *f;
1496 struct hlist_node *h;
290d2557 1497 bool found = false;
278e7d0b 1498 int bkt;
35ec2ff3 1499
6a9a5ec1 1500 lockdep_assert_held(&vsi->mac_filter_hash_lock);
278e7d0b 1501 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
290d2557
JK
1502 if (ether_addr_equal(macaddr, f->macaddr)) {
1503 __i40e_del_filter(vsi, f);
1504 found = true;
1505 }
35ec2ff3 1506 }
290d2557
JK
1507
1508 if (found)
35ec2ff3 1509 return 0;
290d2557
JK
1510 else
1511 return -ENOENT;
35ec2ff3
JK
1512}
1513
41c445ff
JB
1514/**
1515 * i40e_set_mac - NDO callback to set mac address
1516 * @netdev: network interface device structure
1517 * @p: pointer to an address structure
1518 *
1519 * Returns 0 on success, negative on failure
1520 **/
1521static int i40e_set_mac(struct net_device *netdev, void *p)
1522{
1523 struct i40e_netdev_priv *np = netdev_priv(netdev);
1524 struct i40e_vsi *vsi = np->vsi;
30650cc5
SN
1525 struct i40e_pf *pf = vsi->back;
1526 struct i40e_hw *hw = &pf->hw;
41c445ff 1527 struct sockaddr *addr = p;
41c445ff
JB
1528
1529 if (!is_valid_ether_addr(addr->sa_data))
1530 return -EADDRNOTAVAIL;
1531
30650cc5
SN
1532 if (ether_addr_equal(netdev->dev_addr, addr->sa_data)) {
1533 netdev_info(netdev, "already using mac address %pM\n",
1534 addr->sa_data);
1535 return 0;
1536 }
41c445ff 1537
e7bac7af
PM
1538 if (test_bit(__I40E_DOWN, pf->state) ||
1539 test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
80f6428f
ASJ
1540 return -EADDRNOTAVAIL;
1541
30650cc5
SN
1542 if (ether_addr_equal(hw->mac.addr, addr->sa_data))
1543 netdev_info(netdev, "returning to hw mac address %pM\n",
1544 hw->mac.addr);
1545 else
1546 netdev_info(netdev, "set new mac address %pM\n", addr->sa_data);
1547
458867b2 1548 /* Copy the address first, so that we avoid a possible race with
158daed1
SA
1549 * .set_rx_mode().
1550 * - Remove old address from MAC filter
1551 * - Copy new address
1552 * - Add new address to MAC filter
458867b2 1553 */
278e7d0b 1554 spin_lock_bh(&vsi->mac_filter_hash_lock);
feffdbe4 1555 i40e_del_mac_filter(vsi, netdev->dev_addr);
158daed1
SA
1556 ether_addr_copy(netdev->dev_addr, addr->sa_data);
1557 i40e_add_mac_filter(vsi, netdev->dev_addr);
278e7d0b 1558 spin_unlock_bh(&vsi->mac_filter_hash_lock);
158daed1 1559
41c445ff
JB
1560 if (vsi->type == I40E_VSI_MAIN) {
1561 i40e_status ret;
6995b36c 1562
e7bac7af 1563 ret = i40e_aq_mac_address_write(hw, I40E_AQC_WRITE_TYPE_LAA_WOL,
41c445ff 1564 addr->sa_data, NULL);
c3c7ea27
MW
1565 if (ret)
1566 netdev_info(netdev, "Ignoring error from firmware on LAA update, status %s, AQ ret %s\n",
1567 i40e_stat_str(hw, ret),
1568 i40e_aq_str(hw, hw->aq.asq_last_status));
30650cc5
SN
1569 }
1570
c53934c6
JB
1571 /* schedule our worker thread which will take care of
1572 * applying the new filter changes
1573 */
e7bac7af 1574 i40e_service_event_schedule(pf);
c53934c6 1575 return 0;
41c445ff
JB
1576}
1577
a9ce82f7
AN
1578/**
1579 * i40e_config_rss_aq - Prepare for RSS using AQ commands
1580 * @vsi: vsi structure
1581 * @seed: RSS hash seed
1582 **/
1583static int i40e_config_rss_aq(struct i40e_vsi *vsi, const u8 *seed,
1584 u8 *lut, u16 lut_size)
1585{
1586 struct i40e_pf *pf = vsi->back;
1587 struct i40e_hw *hw = &pf->hw;
1588 int ret = 0;
1589
1590 if (seed) {
1591 struct i40e_aqc_get_set_rss_key_data *seed_dw =
1592 (struct i40e_aqc_get_set_rss_key_data *)seed;
1593 ret = i40e_aq_set_rss_key(hw, vsi->id, seed_dw);
1594 if (ret) {
1595 dev_info(&pf->pdev->dev,
1596 "Cannot set RSS key, err %s aq_err %s\n",
1597 i40e_stat_str(hw, ret),
1598 i40e_aq_str(hw, hw->aq.asq_last_status));
1599 return ret;
1600 }
1601 }
1602 if (lut) {
1603 bool pf_lut = vsi->type == I40E_VSI_MAIN ? true : false;
1604
1605 ret = i40e_aq_set_rss_lut(hw, vsi->id, pf_lut, lut, lut_size);
1606 if (ret) {
1607 dev_info(&pf->pdev->dev,
1608 "Cannot set RSS lut, err %s aq_err %s\n",
1609 i40e_stat_str(hw, ret),
1610 i40e_aq_str(hw, hw->aq.asq_last_status));
1611 return ret;
1612 }
1613 }
1614 return ret;
1615}
1616
1617/**
1618 * i40e_vsi_config_rss - Prepare for VSI(VMDq) RSS if used
1619 * @vsi: VSI structure
1620 **/
1621static int i40e_vsi_config_rss(struct i40e_vsi *vsi)
1622{
1623 struct i40e_pf *pf = vsi->back;
1624 u8 seed[I40E_HKEY_ARRAY_SIZE];
1625 u8 *lut;
1626 int ret;
1627
1628 if (!(pf->hw_features & I40E_HW_RSS_AQ_CAPABLE))
1629 return 0;
1630 if (!vsi->rss_size)
1631 vsi->rss_size = min_t(int, pf->alloc_rss_size,
1632 vsi->num_queue_pairs);
1633 if (!vsi->rss_size)
1634 return -EINVAL;
1635 lut = kzalloc(vsi->rss_table_size, GFP_KERNEL);
1636 if (!lut)
1637 return -ENOMEM;
1638
1639 /* Use the user configured hash keys and lookup table if there is one,
1640 * otherwise use default
1641 */
1642 if (vsi->rss_lut_user)
1643 memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size);
1644 else
1645 i40e_fill_rss_lut(pf, lut, vsi->rss_table_size, vsi->rss_size);
1646 if (vsi->rss_hkey_user)
1647 memcpy(seed, vsi->rss_hkey_user, I40E_HKEY_ARRAY_SIZE);
1648 else
1649 netdev_rss_key_fill((void *)seed, I40E_HKEY_ARRAY_SIZE);
1650 ret = i40e_config_rss_aq(vsi, seed, lut, vsi->rss_table_size);
1651 kfree(lut);
1652 return ret;
1653}
1654
1655/**
1656 * i40e_vsi_setup_queue_map_mqprio - Prepares mqprio based tc_config
1657 * @vsi: the VSI being configured,
1658 * @ctxt: VSI context structure
1659 * @enabled_tc: number of traffic classes to enable
1660 *
1661 * Prepares VSI tc_config to have queue configurations based on MQPRIO options.
1662 **/
1663static int i40e_vsi_setup_queue_map_mqprio(struct i40e_vsi *vsi,
1664 struct i40e_vsi_context *ctxt,
1665 u8 enabled_tc)
1666{
1667 u16 qcount = 0, max_qcount, qmap, sections = 0;
1668 int i, override_q, pow, num_qps, ret;
1669 u8 netdev_tc = 0, offset = 0;
1670
1671 if (vsi->type != I40E_VSI_MAIN)
1672 return -EINVAL;
1673 sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
1674 sections |= I40E_AQ_VSI_PROP_SCHED_VALID;
1675 vsi->tc_config.numtc = vsi->mqprio_qopt.qopt.num_tc;
1676 vsi->tc_config.enabled_tc = enabled_tc ? enabled_tc : 1;
1677 num_qps = vsi->mqprio_qopt.qopt.count[0];
1678
1679 /* find the next higher power-of-2 of num queue pairs */
1680 pow = ilog2(num_qps);
1681 if (!is_power_of_2(num_qps))
1682 pow++;
1683 qmap = (offset << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
1684 (pow << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT);
1685
1686 /* Setup queue offset/count for all TCs for given VSI */
1687 max_qcount = vsi->mqprio_qopt.qopt.count[0];
1688 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1689 /* See if the given TC is enabled for the given VSI */
1690 if (vsi->tc_config.enabled_tc & BIT(i)) {
1691 offset = vsi->mqprio_qopt.qopt.offset[i];
1692 qcount = vsi->mqprio_qopt.qopt.count[i];
1693 if (qcount > max_qcount)
1694 max_qcount = qcount;
1695 vsi->tc_config.tc_info[i].qoffset = offset;
1696 vsi->tc_config.tc_info[i].qcount = qcount;
1697 vsi->tc_config.tc_info[i].netdev_tc = netdev_tc++;
1698 } else {
1699 /* TC is not enabled so set the offset to
1700 * default queue and allocate one queue
1701 * for the given TC.
1702 */
1703 vsi->tc_config.tc_info[i].qoffset = 0;
1704 vsi->tc_config.tc_info[i].qcount = 1;
1705 vsi->tc_config.tc_info[i].netdev_tc = 0;
1706 }
1707 }
1708
1709 /* Set actual Tx/Rx queue pairs */
1710 vsi->num_queue_pairs = offset + qcount;
1711
1712 /* Setup queue TC[0].qmap for given VSI context */
1713 ctxt->info.tc_mapping[0] = cpu_to_le16(qmap);
1714 ctxt->info.mapping_flags |= cpu_to_le16(I40E_AQ_VSI_QUE_MAP_CONTIG);
1715 ctxt->info.queue_mapping[0] = cpu_to_le16(vsi->base_queue);
1716 ctxt->info.valid_sections |= cpu_to_le16(sections);
1717
1718 /* Reconfigure RSS for main VSI with max queue count */
1719 vsi->rss_size = max_qcount;
1720 ret = i40e_vsi_config_rss(vsi);
1721 if (ret) {
1722 dev_info(&vsi->back->pdev->dev,
1723 "Failed to reconfig rss for num_queues (%u)\n",
1724 max_qcount);
1725 return ret;
1726 }
1727 vsi->reconfig_rss = true;
1728 dev_dbg(&vsi->back->pdev->dev,
1729 "Reconfigured rss with num_queues (%u)\n", max_qcount);
1730
1731 /* Find queue count available for channel VSIs and starting offset
1732 * for channel VSIs
1733 */
1734 override_q = vsi->mqprio_qopt.qopt.count[0];
1735 if (override_q && override_q < vsi->num_queue_pairs) {
1736 vsi->cnt_q_avail = vsi->num_queue_pairs - override_q;
1737 vsi->next_base_queue = override_q;
1738 }
1739 return 0;
1740}
1741
41c445ff
JB
1742/**
1743 * i40e_vsi_setup_queue_map - Setup a VSI queue map based on enabled_tc
1744 * @vsi: the VSI being setup
1745 * @ctxt: VSI context structure
1746 * @enabled_tc: Enabled TCs bitmap
1747 * @is_add: True if called before Add VSI
1748 *
1749 * Setup VSI queue mapping for enabled traffic classes.
1750 **/
1751static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
1752 struct i40e_vsi_context *ctxt,
1753 u8 enabled_tc,
1754 bool is_add)
1755{
1756 struct i40e_pf *pf = vsi->back;
1757 u16 sections = 0;
1758 u8 netdev_tc = 0;
bc6d33c8 1759 u16 numtc = 1;
41c445ff
JB
1760 u16 qcount;
1761 u8 offset;
1762 u16 qmap;
1763 int i;
4e3b35b0 1764 u16 num_tc_qps = 0;
41c445ff
JB
1765
1766 sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
1767 offset = 0;
1768
bc6d33c8
AN
1769 /* Number of queues per enabled TC */
1770 num_tc_qps = vsi->alloc_queue_pairs;
41c445ff
JB
1771 if (enabled_tc && (vsi->back->flags & I40E_FLAG_DCB_ENABLED)) {
1772 /* Find numtc from enabled TC bitmap */
bc6d33c8 1773 for (i = 0, numtc = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
75f5cea9 1774 if (enabled_tc & BIT(i)) /* TC is enabled */
41c445ff
JB
1775 numtc++;
1776 }
1777 if (!numtc) {
1778 dev_warn(&pf->pdev->dev, "DCB is enabled but no TC enabled, forcing TC0\n");
1779 numtc = 1;
1780 }
bc6d33c8
AN
1781 num_tc_qps = num_tc_qps / numtc;
1782 num_tc_qps = min_t(int, num_tc_qps,
1783 i40e_pf_get_max_q_per_tc(pf));
41c445ff
JB
1784 }
1785
1786 vsi->tc_config.numtc = numtc;
1787 vsi->tc_config.enabled_tc = enabled_tc ? enabled_tc : 1;
1788
1563f2d2
PJ
1789 /* Do not allow use more TC queue pairs than MSI-X vectors exist */
1790 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
1791 num_tc_qps = min_t(int, num_tc_qps, pf->num_lan_msix);
1792
41c445ff
JB
1793 /* Setup queue offset/count for all TCs for given VSI */
1794 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1795 /* See if the given TC is enabled for the given VSI */
75f5cea9 1796 if (vsi->tc_config.enabled_tc & BIT(i)) {
41a1d04b 1797 /* TC is enabled */
41c445ff
JB
1798 int pow, num_qps;
1799
41c445ff
JB
1800 switch (vsi->type) {
1801 case I40E_VSI_MAIN:
bc6d33c8
AN
1802 if (!(pf->flags & (I40E_FLAG_FD_SB_ENABLED |
1803 I40E_FLAG_FD_ATR_ENABLED)) ||
1804 vsi->tc_config.enabled_tc != 1) {
1805 qcount = min_t(int, pf->alloc_rss_size,
1806 num_tc_qps);
1807 break;
1808 }
1e84374f 1809 /* fall through */
41c445ff
JB
1810 case I40E_VSI_FDIR:
1811 case I40E_VSI_SRIOV:
1812 case I40E_VSI_VMDQ2:
1813 default:
4e3b35b0 1814 qcount = num_tc_qps;
41c445ff
JB
1815 WARN_ON(i != 0);
1816 break;
1817 }
4e3b35b0
NP
1818 vsi->tc_config.tc_info[i].qoffset = offset;
1819 vsi->tc_config.tc_info[i].qcount = qcount;
41c445ff 1820
1e200e4a 1821 /* find the next higher power-of-2 of num queue pairs */
4e3b35b0 1822 num_qps = qcount;
41c445ff 1823 pow = 0;
41a1d04b 1824 while (num_qps && (BIT_ULL(pow) < qcount)) {
41c445ff
JB
1825 pow++;
1826 num_qps >>= 1;
1827 }
1828
1829 vsi->tc_config.tc_info[i].netdev_tc = netdev_tc++;
1830 qmap =
1831 (offset << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
1832 (pow << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT);
1833
4e3b35b0 1834 offset += qcount;
41c445ff
JB
1835 } else {
1836 /* TC is not enabled so set the offset to
1837 * default queue and allocate one queue
1838 * for the given TC.
1839 */
1840 vsi->tc_config.tc_info[i].qoffset = 0;
1841 vsi->tc_config.tc_info[i].qcount = 1;
1842 vsi->tc_config.tc_info[i].netdev_tc = 0;
1843
1844 qmap = 0;
1845 }
1846 ctxt->info.tc_mapping[i] = cpu_to_le16(qmap);
1847 }
1848
1849 /* Set actual Tx/Rx queue pairs */
1850 vsi->num_queue_pairs = offset;
9a3bd2f1
ASJ
1851 if ((vsi->type == I40E_VSI_MAIN) && (numtc == 1)) {
1852 if (vsi->req_queue_pairs > 0)
1853 vsi->num_queue_pairs = vsi->req_queue_pairs;
26cdc443 1854 else if (pf->flags & I40E_FLAG_MSIX_ENABLED)
9a3bd2f1
ASJ
1855 vsi->num_queue_pairs = pf->num_lan_msix;
1856 }
41c445ff
JB
1857
1858 /* Scheduler section valid can only be set for ADD VSI */
1859 if (is_add) {
1860 sections |= I40E_AQ_VSI_PROP_SCHED_VALID;
1861
1862 ctxt->info.up_enable_bits = enabled_tc;
1863 }
1864 if (vsi->type == I40E_VSI_SRIOV) {
1865 ctxt->info.mapping_flags |=
1866 cpu_to_le16(I40E_AQ_VSI_QUE_MAP_NONCONTIG);
1867 for (i = 0; i < vsi->num_queue_pairs; i++)
1868 ctxt->info.queue_mapping[i] =
1869 cpu_to_le16(vsi->base_queue + i);
1870 } else {
1871 ctxt->info.mapping_flags |=
1872 cpu_to_le16(I40E_AQ_VSI_QUE_MAP_CONTIG);
1873 ctxt->info.queue_mapping[0] = cpu_to_le16(vsi->base_queue);
1874 }
1875 ctxt->info.valid_sections |= cpu_to_le16(sections);
1876}
1877
6622f5cd
JK
1878/**
1879 * i40e_addr_sync - Callback for dev_(mc|uc)_sync to add address
1880 * @netdev: the netdevice
1881 * @addr: address to add
1882 *
1883 * Called by __dev_(mc|uc)_sync when an address needs to be added. We call
1884 * __dev_(uc|mc)_sync from .set_rx_mode and guarantee to hold the hash lock.
1885 */
1886static int i40e_addr_sync(struct net_device *netdev, const u8 *addr)
1887{
1888 struct i40e_netdev_priv *np = netdev_priv(netdev);
1889 struct i40e_vsi *vsi = np->vsi;
6622f5cd 1890
feffdbe4 1891 if (i40e_add_mac_filter(vsi, addr))
6622f5cd
JK
1892 return 0;
1893 else
1894 return -ENOMEM;
1895}
1896
1897/**
1898 * i40e_addr_unsync - Callback for dev_(mc|uc)_sync to remove address
1899 * @netdev: the netdevice
1900 * @addr: address to add
1901 *
1902 * Called by __dev_(mc|uc)_sync when an address needs to be removed. We call
1903 * __dev_(uc|mc)_sync from .set_rx_mode and guarantee to hold the hash lock.
1904 */
1905static int i40e_addr_unsync(struct net_device *netdev, const u8 *addr)
1906{
1907 struct i40e_netdev_priv *np = netdev_priv(netdev);
1908 struct i40e_vsi *vsi = np->vsi;
1909
458867b2
JK
1910 /* Under some circumstances, we might receive a request to delete
1911 * our own device address from our uc list. Because we store the
1912 * device address in the VSI's MAC/VLAN filter list, we need to ignore
1913 * such requests and not delete our device address from this list.
1914 */
1915 if (ether_addr_equal(addr, netdev->dev_addr))
1916 return 0;
1917
feffdbe4 1918 i40e_del_mac_filter(vsi, addr);
6622f5cd
JK
1919
1920 return 0;
1921}
1922
41c445ff
JB
1923/**
1924 * i40e_set_rx_mode - NDO callback to set the netdev filters
1925 * @netdev: network interface device structure
1926 **/
1927static void i40e_set_rx_mode(struct net_device *netdev)
1928{
1929 struct i40e_netdev_priv *np = netdev_priv(netdev);
41c445ff 1930 struct i40e_vsi *vsi = np->vsi;
41c445ff 1931
278e7d0b 1932 spin_lock_bh(&vsi->mac_filter_hash_lock);
21659035 1933
6622f5cd
JK
1934 __dev_uc_sync(netdev, i40e_addr_sync, i40e_addr_unsync);
1935 __dev_mc_sync(netdev, i40e_addr_sync, i40e_addr_unsync);
41c445ff 1936
278e7d0b 1937 spin_unlock_bh(&vsi->mac_filter_hash_lock);
41c445ff
JB
1938
1939 /* check for other flag changes */
1940 if (vsi->current_netdev_flags != vsi->netdev->flags) {
1941 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
bfe040c3 1942 set_bit(__I40E_MACVLAN_SYNC_PENDING, vsi->back->state);
41c445ff
JB
1943 }
1944}
1945
21659035 1946/**
671889e6 1947 * i40e_undo_del_filter_entries - Undo the changes made to MAC filter entries
4a2ce27b 1948 * @vsi: Pointer to VSI struct
21659035
KP
1949 * @from: Pointer to list which contains MAC filter entries - changes to
1950 * those entries needs to be undone.
1951 *
671889e6 1952 * MAC filter entries from this list were slated for deletion.
21659035 1953 **/
671889e6
JK
1954static void i40e_undo_del_filter_entries(struct i40e_vsi *vsi,
1955 struct hlist_head *from)
21659035 1956{
278e7d0b
JK
1957 struct i40e_mac_filter *f;
1958 struct hlist_node *h;
1959
1960 hlist_for_each_entry_safe(f, h, from, hlist) {
1961 u64 key = i40e_addr_to_hkey(f->macaddr);
21659035 1962
21659035 1963 /* Move the element back into MAC filter list*/
278e7d0b
JK
1964 hlist_del(&f->hlist);
1965 hash_add(vsi->mac_filter_hash, &f->hlist, key);
21659035
KP
1966 }
1967}
1968
671889e6
JK
1969/**
1970 * i40e_undo_add_filter_entries - Undo the changes made to MAC filter entries
1971 * @vsi: Pointer to vsi struct
1972 * @from: Pointer to list which contains MAC filter entries - changes to
1973 * those entries needs to be undone.
1974 *
1975 * MAC filter entries from this list were slated for addition.
1976 **/
1977static void i40e_undo_add_filter_entries(struct i40e_vsi *vsi,
1978 struct hlist_head *from)
1979{
1980 struct i40e_new_mac_filter *new;
1981 struct hlist_node *h;
1982
1983 hlist_for_each_entry_safe(new, h, from, hlist) {
1984 /* We can simply free the wrapper structure */
1985 hlist_del(&new->hlist);
1986 kfree(new);
1987 }
1988}
1989
d88d40b0
JK
1990/**
1991 * i40e_next_entry - Get the next non-broadcast filter from a list
671889e6 1992 * @next: pointer to filter in list
d88d40b0
JK
1993 *
1994 * Returns the next non-broadcast filter in the list. Required so that we
1995 * ignore broadcast filters within the list, since these are not handled via
1996 * the normal firmware update path.
1997 */
671889e6
JK
1998static
1999struct i40e_new_mac_filter *i40e_next_filter(struct i40e_new_mac_filter *next)
d88d40b0 2000{
584a8870
JK
2001 hlist_for_each_entry_continue(next, hlist) {
2002 if (!is_broadcast_ether_addr(next->f->macaddr))
2003 return next;
d88d40b0
JK
2004 }
2005
584a8870 2006 return NULL;
d88d40b0
JK
2007}
2008
21659035 2009/**
c3c7ea27
MW
2010 * i40e_update_filter_state - Update filter state based on return data
2011 * from firmware
2012 * @count: Number of filters added
2013 * @add_list: return data from fw
f5254429 2014 * @add_head: pointer to first filter in current batch
21659035 2015 *
c3c7ea27
MW
2016 * MAC filter entries from list were slated to be added to device. Returns
2017 * number of successful filters. Note that 0 does NOT mean success!
21659035 2018 **/
c3c7ea27
MW
2019static int
2020i40e_update_filter_state(int count,
2021 struct i40e_aqc_add_macvlan_element_data *add_list,
671889e6 2022 struct i40e_new_mac_filter *add_head)
21659035 2023{
c3c7ea27
MW
2024 int retval = 0;
2025 int i;
21659035 2026
ac9e2390
JK
2027 for (i = 0; i < count; i++) {
2028 /* Always check status of each filter. We don't need to check
2029 * the firmware return status because we pre-set the filter
2030 * status to I40E_AQC_MM_ERR_NO_RES when sending the filter
2031 * request to the adminq. Thus, if it no longer matches then
2032 * we know the filter is active.
c3c7ea27 2033 */
ac9e2390 2034 if (add_list[i].match_method == I40E_AQC_MM_ERR_NO_RES) {
c3c7ea27 2035 add_head->state = I40E_FILTER_FAILED;
ac9e2390
JK
2036 } else {
2037 add_head->state = I40E_FILTER_ACTIVE;
2038 retval++;
c3c7ea27 2039 }
ac9e2390 2040
d88d40b0
JK
2041 add_head = i40e_next_filter(add_head);
2042 if (!add_head)
2043 break;
21659035 2044 }
ac9e2390 2045
c3c7ea27 2046 return retval;
21659035
KP
2047}
2048
00936319
JK
2049/**
2050 * i40e_aqc_del_filters - Request firmware to delete a set of filters
2051 * @vsi: ptr to the VSI
2052 * @vsi_name: name to display in messages
2053 * @list: the list of filters to send to firmware
2054 * @num_del: the number of filters to delete
2055 * @retval: Set to -EIO on failure to delete
2056 *
2057 * Send a request to firmware via AdminQ to delete a set of filters. Uses
2058 * *retval instead of a return value so that success does not force ret_val to
2059 * be set to 0. This ensures that a sequence of calls to this function
2060 * preserve the previous value of *retval on successful delete.
2061 */
2062static
2063void i40e_aqc_del_filters(struct i40e_vsi *vsi, const char *vsi_name,
2064 struct i40e_aqc_remove_macvlan_element_data *list,
2065 int num_del, int *retval)
2066{
2067 struct i40e_hw *hw = &vsi->back->hw;
2068 i40e_status aq_ret;
2069 int aq_err;
2070
2071 aq_ret = i40e_aq_remove_macvlan(hw, vsi->seid, list, num_del, NULL);
2072 aq_err = hw->aq.asq_last_status;
2073
2074 /* Explicitly ignore and do not report when firmware returns ENOENT */
2075 if (aq_ret && !(aq_err == I40E_AQ_RC_ENOENT)) {
2076 *retval = -EIO;
2077 dev_info(&vsi->back->pdev->dev,
2078 "ignoring delete macvlan error on %s, err %s, aq_err %s\n",
2079 vsi_name, i40e_stat_str(hw, aq_ret),
2080 i40e_aq_str(hw, aq_err));
2081 }
2082}
2083
2084/**
2085 * i40e_aqc_add_filters - Request firmware to add a set of filters
2086 * @vsi: ptr to the VSI
2087 * @vsi_name: name to display in messages
2088 * @list: the list of filters to send to firmware
2089 * @add_head: Position in the add hlist
2090 * @num_add: the number of filters to add
00936319
JK
2091 *
2092 * Send a request to firmware via AdminQ to add a chunk of filters. Will set
cc6a96a4
AB
2093 * __I40E_VSI_OVERFLOW_PROMISC bit in vsi->state if the firmware has run out of
2094 * space for more filters.
00936319
JK
2095 */
2096static
2097void i40e_aqc_add_filters(struct i40e_vsi *vsi, const char *vsi_name,
2098 struct i40e_aqc_add_macvlan_element_data *list,
671889e6 2099 struct i40e_new_mac_filter *add_head,
cc6a96a4 2100 int num_add)
00936319
JK
2101{
2102 struct i40e_hw *hw = &vsi->back->hw;
00936319
JK
2103 int aq_err, fcnt;
2104
ac9e2390 2105 i40e_aq_add_macvlan(hw, vsi->seid, list, num_add, NULL);
00936319 2106 aq_err = hw->aq.asq_last_status;
ac9e2390 2107 fcnt = i40e_update_filter_state(num_add, list, add_head);
00936319
JK
2108
2109 if (fcnt != num_add) {
0da36b97 2110 set_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
00936319
JK
2111 dev_warn(&vsi->back->pdev->dev,
2112 "Error %s adding RX filters on %s, promiscuous mode forced on\n",
2113 i40e_aq_str(hw, aq_err),
2114 vsi_name);
2115 }
2116}
2117
435c084a
JK
2118/**
2119 * i40e_aqc_broadcast_filter - Set promiscuous broadcast flags
2120 * @vsi: pointer to the VSI
f5254429 2121 * @vsi_name: the VSI name
435c084a
JK
2122 * @f: filter data
2123 *
2124 * This function sets or clears the promiscuous broadcast flags for VLAN
2125 * filters in order to properly receive broadcast frames. Assumes that only
2126 * broadcast filters are passed.
671889e6
JK
2127 *
2128 * Returns status indicating success or failure;
435c084a 2129 **/
671889e6
JK
2130static i40e_status
2131i40e_aqc_broadcast_filter(struct i40e_vsi *vsi, const char *vsi_name,
2132 struct i40e_mac_filter *f)
435c084a
JK
2133{
2134 bool enable = f->state == I40E_FILTER_NEW;
2135 struct i40e_hw *hw = &vsi->back->hw;
2136 i40e_status aq_ret;
2137
2138 if (f->vlan == I40E_VLAN_ANY) {
2139 aq_ret = i40e_aq_set_vsi_broadcast(hw,
2140 vsi->seid,
2141 enable,
2142 NULL);
2143 } else {
2144 aq_ret = i40e_aq_set_vsi_bc_promisc_on_vlan(hw,
2145 vsi->seid,
2146 enable,
2147 f->vlan,
2148 NULL);
2149 }
2150
a48350c2
AB
2151 if (aq_ret) {
2152 set_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
435c084a 2153 dev_warn(&vsi->back->pdev->dev,
a48350c2 2154 "Error %s, forcing overflow promiscuous on %s\n",
435c084a
JK
2155 i40e_aq_str(hw, hw->aq.asq_last_status),
2156 vsi_name);
a48350c2 2157 }
671889e6
JK
2158
2159 return aq_ret;
435c084a
JK
2160}
2161
bd5608b3
AB
2162/**
2163 * i40e_set_promiscuous - set promiscuous mode
2164 * @pf: board private structure
2165 * @promisc: promisc on or off
2166 *
2167 * There are different ways of setting promiscuous mode on a PF depending on
2168 * what state/environment we're in. This identifies and sets it appropriately.
2169 * Returns 0 on success.
2170 **/
2171static int i40e_set_promiscuous(struct i40e_pf *pf, bool promisc)
2172{
2173 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
2174 struct i40e_hw *hw = &pf->hw;
2175 i40e_status aq_ret;
2176
2177 if (vsi->type == I40E_VSI_MAIN &&
2178 pf->lan_veb != I40E_NO_VEB &&
2179 !(pf->flags & I40E_FLAG_MFP_ENABLED)) {
2180 /* set defport ON for Main VSI instead of true promisc
2181 * this way we will get all unicast/multicast and VLAN
2182 * promisc behavior but will not get VF or VMDq traffic
2183 * replicated on the Main VSI.
2184 */
2185 if (promisc)
2186 aq_ret = i40e_aq_set_default_vsi(hw,
2187 vsi->seid,
2188 NULL);
2189 else
2190 aq_ret = i40e_aq_clear_default_vsi(hw,
2191 vsi->seid,
2192 NULL);
2193 if (aq_ret) {
2194 dev_info(&pf->pdev->dev,
2195 "Set default VSI failed, err %s, aq_err %s\n",
2196 i40e_stat_str(hw, aq_ret),
2197 i40e_aq_str(hw, hw->aq.asq_last_status));
2198 }
2199 } else {
2200 aq_ret = i40e_aq_set_vsi_unicast_promiscuous(
2201 hw,
2202 vsi->seid,
2203 promisc, NULL,
2204 true);
2205 if (aq_ret) {
2206 dev_info(&pf->pdev->dev,
2207 "set unicast promisc failed, err %s, aq_err %s\n",
2208 i40e_stat_str(hw, aq_ret),
2209 i40e_aq_str(hw, hw->aq.asq_last_status));
2210 }
2211 aq_ret = i40e_aq_set_vsi_multicast_promiscuous(
2212 hw,
2213 vsi->seid,
2214 promisc, NULL);
2215 if (aq_ret) {
2216 dev_info(&pf->pdev->dev,
2217 "set multicast promisc failed, err %s, aq_err %s\n",
2218 i40e_stat_str(hw, aq_ret),
2219 i40e_aq_str(hw, hw->aq.asq_last_status));
2220 }
2221 }
2222
2223 if (!aq_ret)
2224 pf->cur_promisc = promisc;
2225
2226 return aq_ret;
2227}
2228
41c445ff
JB
2229/**
2230 * i40e_sync_vsi_filters - Update the VSI filter list to the HW
2231 * @vsi: ptr to the VSI
2232 *
2233 * Push any outstanding VSI filter changes through the AdminQ.
2234 *
2235 * Returns 0 or error value
2236 **/
17652c63 2237int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
41c445ff 2238{
278e7d0b 2239 struct hlist_head tmp_add_list, tmp_del_list;
671889e6
JK
2240 struct i40e_mac_filter *f;
2241 struct i40e_new_mac_filter *new, *add_head = NULL;
3e25a8f3 2242 struct i40e_hw *hw = &vsi->back->hw;
cc6a96a4 2243 bool old_overflow, new_overflow;
38326218 2244 unsigned int failed_filters = 0;
84f5ca6c 2245 unsigned int vlan_filters = 0;
2d1de828 2246 char vsi_name[16] = "PF";
41c445ff 2247 int filter_list_len = 0;
ea02e90b 2248 i40e_status aq_ret = 0;
84f5ca6c 2249 u32 changed_flags = 0;
278e7d0b 2250 struct hlist_node *h;
41c445ff
JB
2251 struct i40e_pf *pf;
2252 int num_add = 0;
2253 int num_del = 0;
84f5ca6c 2254 int retval = 0;
41c445ff 2255 u16 cmd_flags;
c3c7ea27 2256 int list_size;
278e7d0b 2257 int bkt;
41c445ff
JB
2258
2259 /* empty array typed pointers, kcalloc later */
2260 struct i40e_aqc_add_macvlan_element_data *add_list;
2261 struct i40e_aqc_remove_macvlan_element_data *del_list;
2262
0da36b97 2263 while (test_and_set_bit(__I40E_VSI_SYNCING_FILTERS, vsi->state))
41c445ff
JB
2264 usleep_range(1000, 2000);
2265 pf = vsi->back;
2266
cc6a96a4
AB
2267 old_overflow = test_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
2268
41c445ff
JB
2269 if (vsi->netdev) {
2270 changed_flags = vsi->current_netdev_flags ^ vsi->netdev->flags;
2271 vsi->current_netdev_flags = vsi->netdev->flags;
2272 }
2273
278e7d0b
JK
2274 INIT_HLIST_HEAD(&tmp_add_list);
2275 INIT_HLIST_HEAD(&tmp_del_list);
21659035 2276
2d1de828
SN
2277 if (vsi->type == I40E_VSI_SRIOV)
2278 snprintf(vsi_name, sizeof(vsi_name) - 1, "VF %d", vsi->vf_id);
2279 else if (vsi->type != I40E_VSI_MAIN)
2280 snprintf(vsi_name, sizeof(vsi_name) - 1, "vsi %d", vsi->seid);
2281
41c445ff
JB
2282 if (vsi->flags & I40E_VSI_FLAG_FILTER_CHANGED) {
2283 vsi->flags &= ~I40E_VSI_FLAG_FILTER_CHANGED;
2284
278e7d0b 2285 spin_lock_bh(&vsi->mac_filter_hash_lock);
c3c7ea27 2286 /* Create a list of filters to delete. */
278e7d0b 2287 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
c3c7ea27 2288 if (f->state == I40E_FILTER_REMOVE) {
c3c7ea27 2289 /* Move the element into temporary del_list */
278e7d0b
JK
2290 hash_del(&f->hlist);
2291 hlist_add_head(&f->hlist, &tmp_del_list);
84f5ca6c
AB
2292
2293 /* Avoid counting removed filters */
2294 continue;
c3c7ea27
MW
2295 }
2296 if (f->state == I40E_FILTER_NEW) {
671889e6
JK
2297 /* Create a temporary i40e_new_mac_filter */
2298 new = kzalloc(sizeof(*new), GFP_ATOMIC);
2299 if (!new)
2300 goto err_no_memory_locked;
2301
2302 /* Store pointer to the real filter */
2303 new->f = f;
2304 new->state = f->state;
2305
2306 /* Add it to the hash list */
2307 hlist_add_head(&new->hlist, &tmp_add_list);
21659035 2308 }
84f5ca6c 2309
489a3265
JK
2310 /* Count the number of active (current and new) VLAN
2311 * filters we have now. Does not count filters which
2312 * are marked for deletion.
84f5ca6c
AB
2313 */
2314 if (f->vlan > 0)
2315 vlan_filters++;
84f5ca6c
AB
2316 }
2317
489a3265
JK
2318 retval = i40e_correct_mac_vlan_filters(vsi,
2319 &tmp_add_list,
2320 &tmp_del_list,
2321 vlan_filters);
2322 if (retval)
2323 goto err_no_memory_locked;
84f5ca6c 2324
278e7d0b 2325 spin_unlock_bh(&vsi->mac_filter_hash_lock);
21659035
KP
2326 }
2327
2328 /* Now process 'del_list' outside the lock */
278e7d0b 2329 if (!hlist_empty(&tmp_del_list)) {
3e25a8f3 2330 filter_list_len = hw->aq.asq_buf_size /
21659035 2331 sizeof(struct i40e_aqc_remove_macvlan_element_data);
c3c7ea27 2332 list_size = filter_list_len *
f1199998 2333 sizeof(struct i40e_aqc_remove_macvlan_element_data);
c3c7ea27 2334 del_list = kzalloc(list_size, GFP_ATOMIC);
4a2ce27b
JK
2335 if (!del_list)
2336 goto err_no_memory;
21659035 2337
278e7d0b 2338 hlist_for_each_entry_safe(f, h, &tmp_del_list, hlist) {
41c445ff
JB
2339 cmd_flags = 0;
2340
435c084a 2341 /* handle broadcast filters by updating the broadcast
d88d40b0 2342 * promiscuous flag and release filter list.
435c084a
JK
2343 */
2344 if (is_broadcast_ether_addr(f->macaddr)) {
2345 i40e_aqc_broadcast_filter(vsi, vsi_name, f);
2346
2347 hlist_del(&f->hlist);
2348 kfree(f);
2349 continue;
2350 }
2351
41c445ff 2352 /* add to delete list */
9a173901 2353 ether_addr_copy(del_list[num_del].mac_addr, f->macaddr);
c3c7ea27
MW
2354 if (f->vlan == I40E_VLAN_ANY) {
2355 del_list[num_del].vlan_tag = 0;
a6cb9146 2356 cmd_flags |= I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
c3c7ea27
MW
2357 } else {
2358 del_list[num_del].vlan_tag =
2359 cpu_to_le16((u16)(f->vlan));
2360 }
41c445ff 2361
41c445ff
JB
2362 cmd_flags |= I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
2363 del_list[num_del].flags = cmd_flags;
2364 num_del++;
2365
41c445ff
JB
2366 /* flush a full buffer */
2367 if (num_del == filter_list_len) {
00936319
JK
2368 i40e_aqc_del_filters(vsi, vsi_name, del_list,
2369 num_del, &retval);
c3c7ea27 2370 memset(del_list, 0, list_size);
00936319 2371 num_del = 0;
41c445ff 2372 }
21659035
KP
2373 /* Release memory for MAC filter entries which were
2374 * synced up with HW.
2375 */
278e7d0b 2376 hlist_del(&f->hlist);
21659035 2377 kfree(f);
41c445ff 2378 }
21659035 2379
41c445ff 2380 if (num_del) {
00936319
JK
2381 i40e_aqc_del_filters(vsi, vsi_name, del_list,
2382 num_del, &retval);
41c445ff
JB
2383 }
2384
2385 kfree(del_list);
2386 del_list = NULL;
21659035
KP
2387 }
2388
278e7d0b 2389 if (!hlist_empty(&tmp_add_list)) {
c3c7ea27 2390 /* Do all the adds now. */
3e25a8f3 2391 filter_list_len = hw->aq.asq_buf_size /
f1199998 2392 sizeof(struct i40e_aqc_add_macvlan_element_data);
c3c7ea27
MW
2393 list_size = filter_list_len *
2394 sizeof(struct i40e_aqc_add_macvlan_element_data);
2395 add_list = kzalloc(list_size, GFP_ATOMIC);
4a2ce27b
JK
2396 if (!add_list)
2397 goto err_no_memory;
2398
c3c7ea27 2399 num_add = 0;
671889e6 2400 hlist_for_each_entry_safe(new, h, &tmp_add_list, hlist) {
435c084a
JK
2401 /* handle broadcast filters by updating the broadcast
2402 * promiscuous flag instead of adding a MAC filter.
2403 */
671889e6
JK
2404 if (is_broadcast_ether_addr(new->f->macaddr)) {
2405 if (i40e_aqc_broadcast_filter(vsi, vsi_name,
2406 new->f))
2407 new->state = I40E_FILTER_FAILED;
2408 else
2409 new->state = I40E_FILTER_ACTIVE;
435c084a
JK
2410 continue;
2411 }
2412
41c445ff 2413 /* add to add array */
c3c7ea27 2414 if (num_add == 0)
671889e6 2415 add_head = new;
c3c7ea27 2416 cmd_flags = 0;
671889e6
JK
2417 ether_addr_copy(add_list[num_add].mac_addr,
2418 new->f->macaddr);
2419 if (new->f->vlan == I40E_VLAN_ANY) {
c3c7ea27
MW
2420 add_list[num_add].vlan_tag = 0;
2421 cmd_flags |= I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
2422 } else {
2423 add_list[num_add].vlan_tag =
671889e6 2424 cpu_to_le16((u16)(new->f->vlan));
c3c7ea27 2425 }
41c445ff 2426 add_list[num_add].queue_number = 0;
ac9e2390 2427 /* set invalid match method for later detection */
0266ac45 2428 add_list[num_add].match_method = I40E_AQC_MM_ERR_NO_RES;
41c445ff 2429 cmd_flags |= I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
41c445ff
JB
2430 add_list[num_add].flags = cpu_to_le16(cmd_flags);
2431 num_add++;
2432
2433 /* flush a full buffer */
2434 if (num_add == filter_list_len) {
00936319 2435 i40e_aqc_add_filters(vsi, vsi_name, add_list,
cc6a96a4 2436 add_head, num_add);
c3c7ea27 2437 memset(add_list, 0, list_size);
41c445ff 2438 num_add = 0;
41c445ff
JB
2439 }
2440 }
2441 if (num_add) {
00936319 2442 i40e_aqc_add_filters(vsi, vsi_name, add_list, add_head,
cc6a96a4 2443 num_add);
41c445ff 2444 }
c3c7ea27
MW
2445 /* Now move all of the filters from the temp add list back to
2446 * the VSI's list.
2447 */
278e7d0b 2448 spin_lock_bh(&vsi->mac_filter_hash_lock);
671889e6
JK
2449 hlist_for_each_entry_safe(new, h, &tmp_add_list, hlist) {
2450 /* Only update the state if we're still NEW */
2451 if (new->f->state == I40E_FILTER_NEW)
2452 new->f->state = new->state;
2453 hlist_del(&new->hlist);
2454 kfree(new);
c3c7ea27 2455 }
278e7d0b 2456 spin_unlock_bh(&vsi->mac_filter_hash_lock);
41c445ff
JB
2457 kfree(add_list);
2458 add_list = NULL;
c3c7ea27 2459 }
41c445ff 2460
38326218
JK
2461 /* Determine the number of active and failed filters. */
2462 spin_lock_bh(&vsi->mac_filter_hash_lock);
2463 vsi->active_filters = 0;
2464 hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) {
2465 if (f->state == I40E_FILTER_ACTIVE)
2466 vsi->active_filters++;
2467 else if (f->state == I40E_FILTER_FAILED)
2468 failed_filters++;
2469 }
2470 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2471
38326218
JK
2472 /* Check if we are able to exit overflow promiscuous mode. We can
2473 * safely exit if we didn't just enter, we no longer have any failed
2474 * filters, and we have reduced filters below the threshold value.
2475 */
cc6a96a4
AB
2476 if (old_overflow && !failed_filters &&
2477 vsi->active_filters < vsi->promisc_threshold) {
38326218
JK
2478 dev_info(&pf->pdev->dev,
2479 "filter logjam cleared on %s, leaving overflow promiscuous mode\n",
2480 vsi_name);
0da36b97 2481 clear_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
38326218 2482 vsi->promisc_threshold = 0;
41c445ff
JB
2483 }
2484
a856b5cb
ASJ
2485 /* if the VF is not trusted do not do promisc */
2486 if ((vsi->type == I40E_VSI_SRIOV) && !pf->vf[vsi->vf_id].trusted) {
0da36b97 2487 clear_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
a856b5cb
ASJ
2488 goto out;
2489 }
2490
cc6a96a4
AB
2491 new_overflow = test_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
2492
2493 /* If we are entering overflow promiscuous, we need to calculate a new
2494 * threshold for when we are safe to exit
2495 */
2496 if (!old_overflow && new_overflow)
2497 vsi->promisc_threshold = (vsi->active_filters * 3) / 4;
2498
41c445ff
JB
2499 /* check for changes in promiscuous modes */
2500 if (changed_flags & IFF_ALLMULTI) {
2501 bool cur_multipromisc;
6995b36c 2502
41c445ff 2503 cur_multipromisc = !!(vsi->current_netdev_flags & IFF_ALLMULTI);
ea02e90b
MW
2504 aq_ret = i40e_aq_set_vsi_multicast_promiscuous(&vsi->back->hw,
2505 vsi->seid,
2506 cur_multipromisc,
2507 NULL);
2508 if (aq_ret) {
2509 retval = i40e_aq_rc_to_posix(aq_ret,
3e25a8f3 2510 hw->aq.asq_last_status);
41c445ff 2511 dev_info(&pf->pdev->dev,
2d1de828
SN
2512 "set multi promisc failed on %s, err %s aq_err %s\n",
2513 vsi_name,
3e25a8f3
MW
2514 i40e_stat_str(hw, aq_ret),
2515 i40e_aq_str(hw, hw->aq.asq_last_status));
ea02e90b 2516 }
41c445ff 2517 }
e5887239 2518
cc6a96a4 2519 if ((changed_flags & IFF_PROMISC) || old_overflow != new_overflow) {
41c445ff 2520 bool cur_promisc;
6995b36c 2521
41c445ff 2522 cur_promisc = (!!(vsi->current_netdev_flags & IFF_PROMISC) ||
cc6a96a4 2523 new_overflow);
bd5608b3 2524 aq_ret = i40e_set_promiscuous(pf, cur_promisc);
ea02e90b
MW
2525 if (aq_ret) {
2526 retval = i40e_aq_rc_to_posix(aq_ret,
bd5608b3 2527 hw->aq.asq_last_status);
1a10370a 2528 dev_info(&pf->pdev->dev,
bd5608b3
AB
2529 "Setting promiscuous %s failed on %s, err %s aq_err %s\n",
2530 cur_promisc ? "on" : "off",
2531 vsi_name,
2532 i40e_stat_str(hw, aq_ret),
2533 i40e_aq_str(hw, hw->aq.asq_last_status));
ea02e90b 2534 }
41c445ff 2535 }
ea02e90b 2536out:
2818ccd9
JB
2537 /* if something went wrong then set the changed flag so we try again */
2538 if (retval)
2539 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
2540
0da36b97 2541 clear_bit(__I40E_VSI_SYNCING_FILTERS, vsi->state);
ea02e90b 2542 return retval;
4a2ce27b
JK
2543
2544err_no_memory:
2545 /* Restore elements on the temporary add and delete lists */
2546 spin_lock_bh(&vsi->mac_filter_hash_lock);
84f5ca6c 2547err_no_memory_locked:
671889e6
JK
2548 i40e_undo_del_filter_entries(vsi, &tmp_del_list);
2549 i40e_undo_add_filter_entries(vsi, &tmp_add_list);
4a2ce27b
JK
2550 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2551
2552 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
0da36b97 2553 clear_bit(__I40E_VSI_SYNCING_FILTERS, vsi->state);
4a2ce27b 2554 return -ENOMEM;
41c445ff
JB
2555}
2556
2557/**
2558 * i40e_sync_filters_subtask - Sync the VSI filter list with HW
2559 * @pf: board private structure
2560 **/
2561static void i40e_sync_filters_subtask(struct i40e_pf *pf)
2562{
2563 int v;
2564
bfe040c3
JK
2565 if (!pf)
2566 return;
2567 if (!test_and_clear_bit(__I40E_MACVLAN_SYNC_PENDING, pf->state))
41c445ff 2568 return;
41c445ff 2569
505682cd 2570 for (v = 0; v < pf->num_alloc_vsi; v++) {
41c445ff 2571 if (pf->vsi[v] &&
17652c63
JB
2572 (pf->vsi[v]->flags & I40E_VSI_FLAG_FILTER_CHANGED)) {
2573 int ret = i40e_sync_vsi_filters(pf->vsi[v]);
2574
2575 if (ret) {
2576 /* come back and try again later */
bfe040c3
JK
2577 set_bit(__I40E_MACVLAN_SYNC_PENDING,
2578 pf->state);
17652c63
JB
2579 break;
2580 }
2581 }
41c445ff
JB
2582 }
2583}
2584
0c8493d9
BT
2585/**
2586 * i40e_max_xdp_frame_size - returns the maximum allowed frame size for XDP
2587 * @vsi: the vsi
2588 **/
2589static int i40e_max_xdp_frame_size(struct i40e_vsi *vsi)
2590{
2591 if (PAGE_SIZE >= 8192 || (vsi->back->flags & I40E_FLAG_LEGACY_RX))
2592 return I40E_RXBUFFER_2048;
2593 else
2594 return I40E_RXBUFFER_3072;
2595}
2596
41c445ff
JB
2597/**
2598 * i40e_change_mtu - NDO callback to change the Maximum Transfer Unit
2599 * @netdev: network interface device structure
2600 * @new_mtu: new value for maximum frame size
2601 *
2602 * Returns 0 on success, negative on failure
2603 **/
2604static int i40e_change_mtu(struct net_device *netdev, int new_mtu)
2605{
2606 struct i40e_netdev_priv *np = netdev_priv(netdev);
41c445ff 2607 struct i40e_vsi *vsi = np->vsi;
0ef2d5af 2608 struct i40e_pf *pf = vsi->back;
41c445ff 2609
0c8493d9
BT
2610 if (i40e_enabled_xdp_vsi(vsi)) {
2611 int frame_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
2612
2613 if (frame_size > i40e_max_xdp_frame_size(vsi))
2614 return -EINVAL;
2615 }
2616
41c445ff
JB
2617 netdev_info(netdev, "changing MTU from %d to %d\n",
2618 netdev->mtu, new_mtu);
2619 netdev->mtu = new_mtu;
2620 if (netif_running(netdev))
2621 i40e_vsi_reinit_locked(vsi);
5f76a704
JK
2622 set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state);
2623 set_bit(__I40E_CLIENT_L2_CHANGE, pf->state);
41c445ff
JB
2624 return 0;
2625}
2626
beb0dff1
JK
2627/**
2628 * i40e_ioctl - Access the hwtstamp interface
2629 * @netdev: network interface device structure
2630 * @ifr: interface request data
2631 * @cmd: ioctl command
2632 **/
2633int i40e_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
2634{
2635 struct i40e_netdev_priv *np = netdev_priv(netdev);
2636 struct i40e_pf *pf = np->vsi->back;
2637
2638 switch (cmd) {
2639 case SIOCGHWTSTAMP:
2640 return i40e_ptp_get_ts_config(pf, ifr);
2641 case SIOCSHWTSTAMP:
2642 return i40e_ptp_set_ts_config(pf, ifr);
2643 default:
2644 return -EOPNOTSUPP;
2645 }
2646}
2647
41c445ff
JB
2648/**
2649 * i40e_vlan_stripping_enable - Turn on vlan stripping for the VSI
2650 * @vsi: the vsi being adjusted
2651 **/
2652void i40e_vlan_stripping_enable(struct i40e_vsi *vsi)
2653{
2654 struct i40e_vsi_context ctxt;
2655 i40e_status ret;
2656
2657 if ((vsi->info.valid_sections &
2658 cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID)) &&
2659 ((vsi->info.port_vlan_flags & I40E_AQ_VSI_PVLAN_MODE_MASK) == 0))
2660 return; /* already enabled */
2661
2662 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
2663 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
2664 I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
2665
2666 ctxt.seid = vsi->seid;
1a2f6248 2667 ctxt.info = vsi->info;
41c445ff
JB
2668 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
2669 if (ret) {
2670 dev_info(&vsi->back->pdev->dev,
f1c7e72e
SN
2671 "update vlan stripping failed, err %s aq_err %s\n",
2672 i40e_stat_str(&vsi->back->hw, ret),
2673 i40e_aq_str(&vsi->back->hw,
2674 vsi->back->hw.aq.asq_last_status));
41c445ff
JB
2675 }
2676}
2677
2678/**
2679 * i40e_vlan_stripping_disable - Turn off vlan stripping for the VSI
2680 * @vsi: the vsi being adjusted
2681 **/
2682void i40e_vlan_stripping_disable(struct i40e_vsi *vsi)
2683{
2684 struct i40e_vsi_context ctxt;
2685 i40e_status ret;
2686
2687 if ((vsi->info.valid_sections &
2688 cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID)) &&
2689 ((vsi->info.port_vlan_flags & I40E_AQ_VSI_PVLAN_EMOD_MASK) ==
2690 I40E_AQ_VSI_PVLAN_EMOD_MASK))
2691 return; /* already disabled */
2692
2693 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
2694 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
2695 I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
2696
2697 ctxt.seid = vsi->seid;
1a2f6248 2698 ctxt.info = vsi->info;
41c445ff
JB
2699 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
2700 if (ret) {
2701 dev_info(&vsi->back->pdev->dev,
f1c7e72e
SN
2702 "update vlan stripping failed, err %s aq_err %s\n",
2703 i40e_stat_str(&vsi->back->hw, ret),
2704 i40e_aq_str(&vsi->back->hw,
2705 vsi->back->hw.aq.asq_last_status));
41c445ff
JB
2706 }
2707}
2708
41c445ff 2709/**
490a4ad3 2710 * i40e_add_vlan_all_mac - Add a MAC/VLAN filter for each existing MAC address
41c445ff
JB
2711 * @vsi: the vsi being configured
2712 * @vid: vlan id to be added (0 = untagged only , -1 = any)
490a4ad3
JK
2713 *
2714 * This is a helper function for adding a new MAC/VLAN filter with the
2715 * specified VLAN for each existing MAC address already in the hash table.
2716 * This function does *not* perform any accounting to update filters based on
2717 * VLAN mode.
2718 *
2719 * NOTE: this function expects to be called while under the
2720 * mac_filter_hash_lock
41c445ff 2721 **/
9af52f60 2722int i40e_add_vlan_all_mac(struct i40e_vsi *vsi, s16 vid)
41c445ff 2723{
490a4ad3 2724 struct i40e_mac_filter *f, *add_f;
278e7d0b
JK
2725 struct hlist_node *h;
2726 int bkt;
41c445ff 2727
278e7d0b 2728 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
57b341d6
JK
2729 if (f->state == I40E_FILTER_REMOVE)
2730 continue;
1bc87e80 2731 add_f = i40e_add_filter(vsi, f->macaddr, vid);
41c445ff
JB
2732 if (!add_f) {
2733 dev_info(&vsi->back->pdev->dev,
2734 "Could not add vlan filter %d for %pM\n",
2735 vid, f->macaddr);
2736 return -ENOMEM;
2737 }
2738 }
2739
490a4ad3
JK
2740 return 0;
2741}
2742
2743/**
2744 * i40e_vsi_add_vlan - Add VSI membership for given VLAN
2745 * @vsi: the VSI being configured
f94484b7 2746 * @vid: VLAN id to be added
490a4ad3 2747 **/
f94484b7 2748int i40e_vsi_add_vlan(struct i40e_vsi *vsi, u16 vid)
490a4ad3 2749{
489a3265 2750 int err;
490a4ad3 2751
fcf6cfc8 2752 if (vsi->info.pvid)
f94484b7
JK
2753 return -EINVAL;
2754
fcf6cfc8
JK
2755 /* The network stack will attempt to add VID=0, with the intention to
2756 * receive priority tagged packets with a VLAN of 0. Our HW receives
2757 * these packets by default when configured to receive untagged
2758 * packets, so we don't need to add a filter for this case.
2759 * Additionally, HW interprets adding a VID=0 filter as meaning to
2760 * receive *only* tagged traffic and stops receiving untagged traffic.
2761 * Thus, we do not want to actually add a filter for VID=0
2762 */
2763 if (!vid)
2764 return 0;
2765
490a4ad3
JK
2766 /* Locked once because all functions invoked below iterates list*/
2767 spin_lock_bh(&vsi->mac_filter_hash_lock);
490a4ad3 2768 err = i40e_add_vlan_all_mac(vsi, vid);
278e7d0b 2769 spin_unlock_bh(&vsi->mac_filter_hash_lock);
489a3265
JK
2770 if (err)
2771 return err;
21659035 2772
0e4425ed
JB
2773 /* schedule our worker thread which will take care of
2774 * applying the new filter changes
2775 */
2776 i40e_service_event_schedule(vsi->back);
2777 return 0;
41c445ff
JB
2778}
2779
2780/**
490a4ad3 2781 * i40e_rm_vlan_all_mac - Remove MAC/VLAN pair for all MAC with the given VLAN
41c445ff
JB
2782 * @vsi: the vsi being configured
2783 * @vid: vlan id to be removed (0 = untagged only , -1 = any)
490a4ad3
JK
2784 *
2785 * This function should be used to remove all VLAN filters which match the
2786 * given VID. It does not schedule the service event and does not take the
2787 * mac_filter_hash_lock so it may be combined with other operations under
2788 * a single invocation of the mac_filter_hash_lock.
2789 *
2790 * NOTE: this function expects to be called while under the
2791 * mac_filter_hash_lock
2792 */
9af52f60 2793void i40e_rm_vlan_all_mac(struct i40e_vsi *vsi, s16 vid)
41c445ff 2794{
84f5ca6c 2795 struct i40e_mac_filter *f;
278e7d0b 2796 struct hlist_node *h;
278e7d0b 2797 int bkt;
41c445ff 2798
278e7d0b 2799 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
290d2557
JK
2800 if (f->vlan == vid)
2801 __i40e_del_filter(vsi, f);
2802 }
490a4ad3 2803}
41c445ff 2804
490a4ad3
JK
2805/**
2806 * i40e_vsi_kill_vlan - Remove VSI membership for given VLAN
2807 * @vsi: the VSI being configured
f94484b7 2808 * @vid: VLAN id to be removed
490a4ad3 2809 **/
f94484b7 2810void i40e_vsi_kill_vlan(struct i40e_vsi *vsi, u16 vid)
490a4ad3 2811{
f94484b7
JK
2812 if (!vid || vsi->info.pvid)
2813 return;
2814
490a4ad3
JK
2815 spin_lock_bh(&vsi->mac_filter_hash_lock);
2816 i40e_rm_vlan_all_mac(vsi, vid);
278e7d0b 2817 spin_unlock_bh(&vsi->mac_filter_hash_lock);
21659035 2818
0e4425ed
JB
2819 /* schedule our worker thread which will take care of
2820 * applying the new filter changes
2821 */
2822 i40e_service_event_schedule(vsi->back);
41c445ff
JB
2823}
2824
2825/**
2826 * i40e_vlan_rx_add_vid - Add a vlan id filter to HW offload
2827 * @netdev: network interface to be adjusted
f5254429 2828 * @proto: unused protocol value
41c445ff 2829 * @vid: vlan id to be added
078b5876
JB
2830 *
2831 * net_device_ops implementation for adding vlan ids
41c445ff
JB
2832 **/
2833static int i40e_vlan_rx_add_vid(struct net_device *netdev,
2834 __always_unused __be16 proto, u16 vid)
2835{
2836 struct i40e_netdev_priv *np = netdev_priv(netdev);
2837 struct i40e_vsi *vsi = np->vsi;
078b5876 2838 int ret = 0;
41c445ff 2839
6a112785 2840 if (vid >= VLAN_N_VID)
078b5876
JB
2841 return -EINVAL;
2842
fcf6cfc8 2843 ret = i40e_vsi_add_vlan(vsi, vid);
6a112785 2844 if (!ret)
078b5876 2845 set_bit(vid, vsi->active_vlans);
41c445ff 2846
078b5876 2847 return ret;
41c445ff
JB
2848}
2849
27392e57
PJ
2850/**
2851 * i40e_vlan_rx_add_vid_up - Add a vlan id filter to HW offload in UP path
2852 * @netdev: network interface to be adjusted
2853 * @proto: unused protocol value
2854 * @vid: vlan id to be added
2855 **/
2856static void i40e_vlan_rx_add_vid_up(struct net_device *netdev,
2857 __always_unused __be16 proto, u16 vid)
2858{
2859 struct i40e_netdev_priv *np = netdev_priv(netdev);
2860 struct i40e_vsi *vsi = np->vsi;
2861
2862 if (vid >= VLAN_N_VID)
2863 return;
2864 set_bit(vid, vsi->active_vlans);
2865}
2866
41c445ff
JB
2867/**
2868 * i40e_vlan_rx_kill_vid - Remove a vlan id filter from HW offload
2869 * @netdev: network interface to be adjusted
f5254429 2870 * @proto: unused protocol value
41c445ff 2871 * @vid: vlan id to be removed
078b5876 2872 *
fdfd943e 2873 * net_device_ops implementation for removing vlan ids
41c445ff
JB
2874 **/
2875static int i40e_vlan_rx_kill_vid(struct net_device *netdev,
2876 __always_unused __be16 proto, u16 vid)
2877{
2878 struct i40e_netdev_priv *np = netdev_priv(netdev);
2879 struct i40e_vsi *vsi = np->vsi;
2880
41c445ff
JB
2881 /* return code is ignored as there is nothing a user
2882 * can do about failure to remove and a log message was
078b5876 2883 * already printed from the other function
41c445ff
JB
2884 */
2885 i40e_vsi_kill_vlan(vsi, vid);
2886
2887 clear_bit(vid, vsi->active_vlans);
078b5876 2888
41c445ff
JB
2889 return 0;
2890}
2891
2892/**
2893 * i40e_restore_vlan - Reinstate vlans when vsi/netdev comes back up
2894 * @vsi: the vsi being brought back up
2895 **/
2896static void i40e_restore_vlan(struct i40e_vsi *vsi)
2897{
2898 u16 vid;
2899
2900 if (!vsi->netdev)
2901 return;
2902
2972b007
JK
2903 if (vsi->netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
2904 i40e_vlan_stripping_enable(vsi);
2905 else
2906 i40e_vlan_stripping_disable(vsi);
41c445ff
JB
2907
2908 for_each_set_bit(vid, vsi->active_vlans, VLAN_N_VID)
27392e57
PJ
2909 i40e_vlan_rx_add_vid_up(vsi->netdev, htons(ETH_P_8021Q),
2910 vid);
41c445ff
JB
2911}
2912
2913/**
2914 * i40e_vsi_add_pvid - Add pvid for the VSI
2915 * @vsi: the vsi being adjusted
2916 * @vid: the vlan id to set as a PVID
2917 **/
dcae29be 2918int i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid)
41c445ff
JB
2919{
2920 struct i40e_vsi_context ctxt;
f1c7e72e 2921 i40e_status ret;
41c445ff
JB
2922
2923 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
2924 vsi->info.pvid = cpu_to_le16(vid);
6c12fcbf
GR
2925 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_TAGGED |
2926 I40E_AQ_VSI_PVLAN_INSERT_PVID |
b774c7dd 2927 I40E_AQ_VSI_PVLAN_EMOD_STR;
41c445ff
JB
2928
2929 ctxt.seid = vsi->seid;
1a2f6248 2930 ctxt.info = vsi->info;
f1c7e72e
SN
2931 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
2932 if (ret) {
41c445ff 2933 dev_info(&vsi->back->pdev->dev,
f1c7e72e
SN
2934 "add pvid failed, err %s aq_err %s\n",
2935 i40e_stat_str(&vsi->back->hw, ret),
2936 i40e_aq_str(&vsi->back->hw,
2937 vsi->back->hw.aq.asq_last_status));
dcae29be 2938 return -ENOENT;
41c445ff
JB
2939 }
2940
dcae29be 2941 return 0;
41c445ff
JB
2942}
2943
2944/**
2945 * i40e_vsi_remove_pvid - Remove the pvid from the VSI
2946 * @vsi: the vsi being adjusted
2947 *
2948 * Just use the vlan_rx_register() service to put it back to normal
2949 **/
2950void i40e_vsi_remove_pvid(struct i40e_vsi *vsi)
2951{
6c12fcbf
GR
2952 i40e_vlan_stripping_disable(vsi);
2953
41c445ff 2954 vsi->info.pvid = 0;
41c445ff
JB
2955}
2956
2957/**
2958 * i40e_vsi_setup_tx_resources - Allocate VSI Tx queue resources
2959 * @vsi: ptr to the VSI
2960 *
2961 * If this function returns with an error, then it's possible one or
2962 * more of the rings is populated (while the rest are not). It is the
2963 * callers duty to clean those orphaned rings.
2964 *
2965 * Return 0 on success, negative on failure
2966 **/
2967static int i40e_vsi_setup_tx_resources(struct i40e_vsi *vsi)
2968{
2969 int i, err = 0;
2970
2971 for (i = 0; i < vsi->num_queue_pairs && !err; i++)
9f65e15b 2972 err = i40e_setup_tx_descriptors(vsi->tx_rings[i]);
41c445ff 2973
74608d17
BT
2974 if (!i40e_enabled_xdp_vsi(vsi))
2975 return err;
2976
2977 for (i = 0; i < vsi->num_queue_pairs && !err; i++)
2978 err = i40e_setup_tx_descriptors(vsi->xdp_rings[i]);
2979
41c445ff
JB
2980 return err;
2981}
2982
2983/**
2984 * i40e_vsi_free_tx_resources - Free Tx resources for VSI queues
2985 * @vsi: ptr to the VSI
2986 *
2987 * Free VSI's transmit software resources
2988 **/
2989static void i40e_vsi_free_tx_resources(struct i40e_vsi *vsi)
2990{
2991 int i;
2992
74608d17
BT
2993 if (vsi->tx_rings) {
2994 for (i = 0; i < vsi->num_queue_pairs; i++)
2995 if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc)
2996 i40e_free_tx_resources(vsi->tx_rings[i]);
2997 }
8e9dca53 2998
74608d17
BT
2999 if (vsi->xdp_rings) {
3000 for (i = 0; i < vsi->num_queue_pairs; i++)
3001 if (vsi->xdp_rings[i] && vsi->xdp_rings[i]->desc)
3002 i40e_free_tx_resources(vsi->xdp_rings[i]);
3003 }
41c445ff
JB
3004}
3005
3006/**
3007 * i40e_vsi_setup_rx_resources - Allocate VSI queues Rx resources
3008 * @vsi: ptr to the VSI
3009 *
3010 * If this function returns with an error, then it's possible one or
3011 * more of the rings is populated (while the rest are not). It is the
3012 * callers duty to clean those orphaned rings.
3013 *
3014 * Return 0 on success, negative on failure
3015 **/
3016static int i40e_vsi_setup_rx_resources(struct i40e_vsi *vsi)
3017{
3018 int i, err = 0;
3019
3020 for (i = 0; i < vsi->num_queue_pairs && !err; i++)
9f65e15b 3021 err = i40e_setup_rx_descriptors(vsi->rx_rings[i]);
41c445ff
JB
3022 return err;
3023}
3024
3025/**
3026 * i40e_vsi_free_rx_resources - Free Rx Resources for VSI queues
3027 * @vsi: ptr to the VSI
3028 *
3029 * Free all receive software resources
3030 **/
3031static void i40e_vsi_free_rx_resources(struct i40e_vsi *vsi)
3032{
3033 int i;
3034
8e9dca53
GR
3035 if (!vsi->rx_rings)
3036 return;
3037
41c445ff 3038 for (i = 0; i < vsi->num_queue_pairs; i++)
8e9dca53 3039 if (vsi->rx_rings[i] && vsi->rx_rings[i]->desc)
9f65e15b 3040 i40e_free_rx_resources(vsi->rx_rings[i]);
41c445ff
JB
3041}
3042
3ffa037d
NP
3043/**
3044 * i40e_config_xps_tx_ring - Configure XPS for a Tx ring
3045 * @ring: The Tx ring to configure
3046 *
3047 * This enables/disables XPS for a given Tx descriptor ring
3048 * based on the TCs enabled for the VSI that ring belongs to.
3049 **/
3050static void i40e_config_xps_tx_ring(struct i40e_ring *ring)
3051{
be664cbe 3052 int cpu;
3ffa037d 3053
8f88b303 3054 if (!ring->q_vector || !ring->netdev || ring->ch)
9a660eea
JB
3055 return;
3056
6f853d4f
JK
3057 /* We only initialize XPS once, so as not to overwrite user settings */
3058 if (test_and_set_bit(__I40E_TX_XPS_INIT_DONE, ring->state))
3059 return;
0e4425ed 3060
6f853d4f
JK
3061 cpu = cpumask_local_spread(ring->q_vector->v_idx, -1);
3062 netif_set_xps_queue(ring->netdev, get_cpu_mask(cpu),
3063 ring->queue_index);
3ffa037d
NP
3064}
3065
41c445ff
JB
3066/**
3067 * i40e_configure_tx_ring - Configure a transmit ring context and rest
3068 * @ring: The Tx ring to configure
3069 *
3070 * Configure the Tx descriptor ring in the HMC context.
3071 **/
3072static int i40e_configure_tx_ring(struct i40e_ring *ring)
3073{
3074 struct i40e_vsi *vsi = ring->vsi;
3075 u16 pf_q = vsi->base_queue + ring->queue_index;
3076 struct i40e_hw *hw = &vsi->back->hw;
3077 struct i40e_hmc_obj_txq tx_ctx;
3078 i40e_status err = 0;
3079 u32 qtx_ctl = 0;
3080
1328dcdd
MK
3081 if (ring_is_xdp(ring))
3082 ring->xsk_umem = i40e_xsk_umem(ring);
3083
41c445ff 3084 /* some ATR related tx ring init */
60ea5f83 3085 if (vsi->back->flags & I40E_FLAG_FD_ATR_ENABLED) {
41c445ff
JB
3086 ring->atr_sample_rate = vsi->back->atr_sample_rate;
3087 ring->atr_count = 0;
3088 } else {
3089 ring->atr_sample_rate = 0;
3090 }
3091
3ffa037d
NP
3092 /* configure XPS */
3093 i40e_config_xps_tx_ring(ring);
41c445ff
JB
3094
3095 /* clear the context structure first */
3096 memset(&tx_ctx, 0, sizeof(tx_ctx));
3097
3098 tx_ctx.new_context = 1;
3099 tx_ctx.base = (ring->dma / 128);
3100 tx_ctx.qlen = ring->count;
60ea5f83
JB
3101 tx_ctx.fd_ena = !!(vsi->back->flags & (I40E_FLAG_FD_SB_ENABLED |
3102 I40E_FLAG_FD_ATR_ENABLED));
beb0dff1 3103 tx_ctx.timesync_ena = !!(vsi->back->flags & I40E_FLAG_PTP);
1943d8ba
JB
3104 /* FDIR VSI tx ring can still use RS bit and writebacks */
3105 if (vsi->type != I40E_VSI_FDIR)
3106 tx_ctx.head_wb_ena = 1;
3107 tx_ctx.head_wb_addr = ring->dma +
3108 (ring->count * sizeof(struct i40e_tx_desc));
41c445ff
JB
3109
3110 /* As part of VSI creation/update, FW allocates certain
3111 * Tx arbitration queue sets for each TC enabled for
3112 * the VSI. The FW returns the handles to these queue
3113 * sets as part of the response buffer to Add VSI,
3114 * Update VSI, etc. AQ commands. It is expected that
3115 * these queue set handles be associated with the Tx
3116 * queues by the driver as part of the TX queue context
3117 * initialization. This has to be done regardless of
3118 * DCB as by default everything is mapped to TC0.
3119 */
8f88b303
AN
3120
3121 if (ring->ch)
3122 tx_ctx.rdylist =
3123 le16_to_cpu(ring->ch->info.qs_handle[ring->dcb_tc]);
3124
3125 else
3126 tx_ctx.rdylist = le16_to_cpu(vsi->info.qs_handle[ring->dcb_tc]);
3127
41c445ff
JB
3128 tx_ctx.rdylist_act = 0;
3129
3130 /* clear the context in the HMC */
3131 err = i40e_clear_lan_tx_queue_context(hw, pf_q);
3132 if (err) {
3133 dev_info(&vsi->back->pdev->dev,
3134 "Failed to clear LAN Tx queue context on Tx ring %d (pf_q %d), error: %d\n",
3135 ring->queue_index, pf_q, err);
3136 return -ENOMEM;
3137 }
3138
3139 /* set the context in the HMC */
3140 err = i40e_set_lan_tx_queue_context(hw, pf_q, &tx_ctx);
3141 if (err) {
3142 dev_info(&vsi->back->pdev->dev,
3143 "Failed to set LAN Tx queue context on Tx ring %d (pf_q %d, error: %d\n",
3144 ring->queue_index, pf_q, err);
3145 return -ENOMEM;
3146 }
3147
3148 /* Now associate this queue with this PCI function */
8f88b303
AN
3149 if (ring->ch) {
3150 if (ring->ch->type == I40E_VSI_VMDQ2)
3151 qtx_ctl = I40E_QTX_CTL_VM_QUEUE;
3152 else
3153 return -EINVAL;
3154
3155 qtx_ctl |= (ring->ch->vsi_number <<
3156 I40E_QTX_CTL_VFVM_INDX_SHIFT) &
3157 I40E_QTX_CTL_VFVM_INDX_MASK;
7a28d885 3158 } else {
8f88b303
AN
3159 if (vsi->type == I40E_VSI_VMDQ2) {
3160 qtx_ctl = I40E_QTX_CTL_VM_QUEUE;
3161 qtx_ctl |= ((vsi->id) << I40E_QTX_CTL_VFVM_INDX_SHIFT) &
3162 I40E_QTX_CTL_VFVM_INDX_MASK;
3163 } else {
3164 qtx_ctl = I40E_QTX_CTL_PF_QUEUE;
3165 }
7a28d885
MW
3166 }
3167
13fd9774
SN
3168 qtx_ctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) &
3169 I40E_QTX_CTL_PF_INDX_MASK);
41c445ff
JB
3170 wr32(hw, I40E_QTX_CTL(pf_q), qtx_ctl);
3171 i40e_flush(hw);
3172
41c445ff
JB
3173 /* cache tail off for easier writes later */
3174 ring->tail = hw->hw_addr + I40E_QTX_TAIL(pf_q);
3175
3176 return 0;
3177}
3178
3179/**
3180 * i40e_configure_rx_ring - Configure a receive ring context
3181 * @ring: The Rx ring to configure
3182 *
3183 * Configure the Rx descriptor ring in the HMC context.
3184 **/
3185static int i40e_configure_rx_ring(struct i40e_ring *ring)
3186{
3187 struct i40e_vsi *vsi = ring->vsi;
3188 u32 chain_len = vsi->back->hw.func_caps.rx_buf_chain_len;
3189 u16 pf_q = vsi->base_queue + ring->queue_index;
3190 struct i40e_hw *hw = &vsi->back->hw;
3191 struct i40e_hmc_obj_rxq rx_ctx;
3192 i40e_status err = 0;
0a714186
BT
3193 bool ok;
3194 int ret;
41c445ff 3195
bd6cd4e6 3196 bitmap_zero(ring->state, __I40E_RING_STATE_NBITS);
41c445ff
JB
3197
3198 /* clear the context structure first */
3199 memset(&rx_ctx, 0, sizeof(rx_ctx));
3200
0a714186
BT
3201 if (ring->vsi->type == I40E_VSI_MAIN)
3202 xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq);
3203
3204 ring->xsk_umem = i40e_xsk_umem(ring);
3205 if (ring->xsk_umem) {
3206 ring->rx_buf_len = ring->xsk_umem->chunk_size_nohr -
3207 XDP_PACKET_HEADROOM;
3208 /* For AF_XDP ZC, we disallow packets to span on
3209 * multiple buffers, thus letting us skip that
3210 * handling in the fast-path.
3211 */
3212 chain_len = 1;
3213 ring->zca.free = i40e_zca_free;
3214 ret = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
3215 MEM_TYPE_ZERO_COPY,
3216 &ring->zca);
3217 if (ret)
3218 return ret;
3219 dev_info(&vsi->back->pdev->dev,
3220 "Registered XDP mem model MEM_TYPE_ZERO_COPY on Rx ring %d\n",
3221 ring->queue_index);
3222
3223 } else {
3224 ring->rx_buf_len = vsi->rx_buf_len;
3225 if (ring->vsi->type == I40E_VSI_MAIN) {
3226 ret = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
3227 MEM_TYPE_PAGE_SHARED,
3228 NULL);
3229 if (ret)
3230 return ret;
3231 }
3232 }
41c445ff 3233
dab86afd
AD
3234 rx_ctx.dbuff = DIV_ROUND_UP(ring->rx_buf_len,
3235 BIT_ULL(I40E_RXQ_CTX_DBUFF_SHIFT));
41c445ff
JB
3236
3237 rx_ctx.base = (ring->dma / 128);
3238 rx_ctx.qlen = ring->count;
3239
bec60fc4
JB
3240 /* use 32 byte descriptors */
3241 rx_ctx.dsize = 1;
41c445ff 3242
bec60fc4
JB
3243 /* descriptor type is always zero
3244 * rx_ctx.dtype = 0;
3245 */
b32bfa17 3246 rx_ctx.hsplit_0 = 0;
41c445ff 3247
b32bfa17 3248 rx_ctx.rxmax = min_t(u16, vsi->max_frame, chain_len * ring->rx_buf_len);
7134f9ce
JB
3249 if (hw->revision_id == 0)
3250 rx_ctx.lrxqthresh = 0;
3251 else
7362be9e 3252 rx_ctx.lrxqthresh = 1;
41c445ff
JB
3253 rx_ctx.crcstrip = 1;
3254 rx_ctx.l2tsel = 1;
c4bbac39
JB
3255 /* this controls whether VLAN is stripped from inner headers */
3256 rx_ctx.showiv = 0;
acb3676b
CS
3257 /* set the prefena field to 1 because the manual says to */
3258 rx_ctx.prefena = 1;
41c445ff
JB
3259
3260 /* clear the context in the HMC */
3261 err = i40e_clear_lan_rx_queue_context(hw, pf_q);
3262 if (err) {
3263 dev_info(&vsi->back->pdev->dev,
3264 "Failed to clear LAN Rx queue context on Rx ring %d (pf_q %d), error: %d\n",
3265 ring->queue_index, pf_q, err);
3266 return -ENOMEM;
3267 }
3268
3269 /* set the context in the HMC */
3270 err = i40e_set_lan_rx_queue_context(hw, pf_q, &rx_ctx);
3271 if (err) {
3272 dev_info(&vsi->back->pdev->dev,
3273 "Failed to set LAN Rx queue context on Rx ring %d (pf_q %d), error: %d\n",
3274 ring->queue_index, pf_q, err);
3275 return -ENOMEM;
3276 }
3277
ca9ec088
AD
3278 /* configure Rx buffer alignment */
3279 if (!vsi->netdev || (vsi->back->flags & I40E_FLAG_LEGACY_RX))
3280 clear_ring_build_skb_enabled(ring);
3281 else
3282 set_ring_build_skb_enabled(ring);
3283
41c445ff
JB
3284 /* cache tail for quicker writes, and clear the reg before use */
3285 ring->tail = hw->hw_addr + I40E_QRX_TAIL(pf_q);
3286 writel(0, ring->tail);
3287
0a714186
BT
3288 ok = ring->xsk_umem ?
3289 i40e_alloc_rx_buffers_zc(ring, I40E_DESC_UNUSED(ring)) :
3290 !i40e_alloc_rx_buffers(ring, I40E_DESC_UNUSED(ring));
3291 if (!ok) {
3292 dev_info(&vsi->back->pdev->dev,
3293 "Failed allocate some buffers on %sRx ring %d (pf_q %d)\n",
3294 ring->xsk_umem ? "UMEM enabled " : "",
3295 ring->queue_index, pf_q);
3296 }
41c445ff
JB
3297
3298 return 0;
3299}
3300
3301/**
3302 * i40e_vsi_configure_tx - Configure the VSI for Tx
3303 * @vsi: VSI structure describing this set of rings and resources
3304 *
3305 * Configure the Tx VSI for operation.
3306 **/
3307static int i40e_vsi_configure_tx(struct i40e_vsi *vsi)
3308{
3309 int err = 0;
3310 u16 i;
3311
9f65e15b
AD
3312 for (i = 0; (i < vsi->num_queue_pairs) && !err; i++)
3313 err = i40e_configure_tx_ring(vsi->tx_rings[i]);
41c445ff 3314
74608d17
BT
3315 if (!i40e_enabled_xdp_vsi(vsi))
3316 return err;
3317
3318 for (i = 0; (i < vsi->num_queue_pairs) && !err; i++)
3319 err = i40e_configure_tx_ring(vsi->xdp_rings[i]);
3320
41c445ff
JB
3321 return err;
3322}
3323
3324/**
3325 * i40e_vsi_configure_rx - Configure the VSI for Rx
3326 * @vsi: the VSI being configured
3327 *
3328 * Configure the Rx VSI for operation.
3329 **/
3330static int i40e_vsi_configure_rx(struct i40e_vsi *vsi)
3331{
3332 int err = 0;
3333 u16 i;
3334
dab86afd
AD
3335 if (!vsi->netdev || (vsi->back->flags & I40E_FLAG_LEGACY_RX)) {
3336 vsi->max_frame = I40E_MAX_RXBUFFER;
3337 vsi->rx_buf_len = I40E_RXBUFFER_2048;
3338#if (PAGE_SIZE < 8192)
ca9ec088
AD
3339 } else if (!I40E_2K_TOO_SMALL_WITH_PADDING &&
3340 (vsi->netdev->mtu <= ETH_DATA_LEN)) {
dab86afd
AD
3341 vsi->max_frame = I40E_RXBUFFER_1536 - NET_IP_ALIGN;
3342 vsi->rx_buf_len = I40E_RXBUFFER_1536 - NET_IP_ALIGN;
3343#endif
3344 } else {
3345 vsi->max_frame = I40E_MAX_RXBUFFER;
98efd694
AD
3346 vsi->rx_buf_len = (PAGE_SIZE < 8192) ? I40E_RXBUFFER_3072 :
3347 I40E_RXBUFFER_2048;
dab86afd 3348 }
41c445ff
JB
3349
3350 /* set up individual rings */
3351 for (i = 0; i < vsi->num_queue_pairs && !err; i++)
9f65e15b 3352 err = i40e_configure_rx_ring(vsi->rx_rings[i]);
41c445ff
JB
3353
3354 return err;
3355}
3356
3357/**
3358 * i40e_vsi_config_dcb_rings - Update rings to reflect DCB TC
3359 * @vsi: ptr to the VSI
3360 **/
3361static void i40e_vsi_config_dcb_rings(struct i40e_vsi *vsi)
3362{
e7046ee1 3363 struct i40e_ring *tx_ring, *rx_ring;
41c445ff
JB
3364 u16 qoffset, qcount;
3365 int i, n;
3366
cd238a3e
PN
3367 if (!(vsi->back->flags & I40E_FLAG_DCB_ENABLED)) {
3368 /* Reset the TC information */
3369 for (i = 0; i < vsi->num_queue_pairs; i++) {
3370 rx_ring = vsi->rx_rings[i];
3371 tx_ring = vsi->tx_rings[i];
3372 rx_ring->dcb_tc = 0;
3373 tx_ring->dcb_tc = 0;
3374 }
a9ce82f7 3375 return;
cd238a3e 3376 }
41c445ff
JB
3377
3378 for (n = 0; n < I40E_MAX_TRAFFIC_CLASS; n++) {
41a1d04b 3379 if (!(vsi->tc_config.enabled_tc & BIT_ULL(n)))
41c445ff
JB
3380 continue;
3381
3382 qoffset = vsi->tc_config.tc_info[n].qoffset;
3383 qcount = vsi->tc_config.tc_info[n].qcount;
3384 for (i = qoffset; i < (qoffset + qcount); i++) {
e7046ee1
AA
3385 rx_ring = vsi->rx_rings[i];
3386 tx_ring = vsi->tx_rings[i];
41c445ff
JB
3387 rx_ring->dcb_tc = n;
3388 tx_ring->dcb_tc = n;
3389 }
3390 }
3391}
3392
3393/**
3394 * i40e_set_vsi_rx_mode - Call set_rx_mode on a VSI
3395 * @vsi: ptr to the VSI
3396 **/
3397static void i40e_set_vsi_rx_mode(struct i40e_vsi *vsi)
3398{
3399 if (vsi->netdev)
3400 i40e_set_rx_mode(vsi->netdev);
3401}
3402
17a73f6b
JG
3403/**
3404 * i40e_fdir_filter_restore - Restore the Sideband Flow Director filters
3405 * @vsi: Pointer to the targeted VSI
3406 *
3407 * This function replays the hlist on the hw where all the SB Flow Director
3408 * filters were saved.
3409 **/
3410static void i40e_fdir_filter_restore(struct i40e_vsi *vsi)
3411{
3412 struct i40e_fdir_filter *filter;
3413 struct i40e_pf *pf = vsi->back;
3414 struct hlist_node *node;
3415
55a5e60b
ASJ
3416 if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
3417 return;
3418
6d069425 3419 /* Reset FDir counters as we're replaying all existing filters */
097dbf52
JK
3420 pf->fd_tcp4_filter_cnt = 0;
3421 pf->fd_udp4_filter_cnt = 0;
f223c875 3422 pf->fd_sctp4_filter_cnt = 0;
097dbf52 3423 pf->fd_ip4_filter_cnt = 0;
6d069425 3424
17a73f6b
JG
3425 hlist_for_each_entry_safe(filter, node,
3426 &pf->fdir_filter_list, fdir_node) {
3427 i40e_add_del_fdir(vsi, filter, true);
3428 }
3429}
3430
41c445ff
JB
3431/**
3432 * i40e_vsi_configure - Set up the VSI for action
3433 * @vsi: the VSI being configured
3434 **/
3435static int i40e_vsi_configure(struct i40e_vsi *vsi)
3436{
3437 int err;
3438
3439 i40e_set_vsi_rx_mode(vsi);
3440 i40e_restore_vlan(vsi);
3441 i40e_vsi_config_dcb_rings(vsi);
3442 err = i40e_vsi_configure_tx(vsi);
3443 if (!err)
3444 err = i40e_vsi_configure_rx(vsi);
3445
3446 return err;
3447}
3448
3449/**
3450 * i40e_vsi_configure_msix - MSIX mode Interrupt Config in the HW
3451 * @vsi: the VSI being configured
3452 **/
3453static void i40e_vsi_configure_msix(struct i40e_vsi *vsi)
3454{
74608d17 3455 bool has_xdp = i40e_enabled_xdp_vsi(vsi);
41c445ff 3456 struct i40e_pf *pf = vsi->back;
41c445ff
JB
3457 struct i40e_hw *hw = &pf->hw;
3458 u16 vector;
3459 int i, q;
41c445ff
JB
3460 u32 qp;
3461
3462 /* The interrupt indexing is offset by 1 in the PFINT_ITRn
3463 * and PFINT_LNKLSTn registers, e.g.:
3464 * PFINT_ITRn[0..n-1] gets msix-1..msix-n (qpair interrupts)
3465 */
3466 qp = vsi->base_queue;
3467 vector = vsi->base_vector;
493fb300 3468 for (i = 0; i < vsi->num_q_vectors; i++, vector++) {
ac26fc13
JB
3469 struct i40e_q_vector *q_vector = vsi->q_vectors[i];
3470
a0073a4b 3471 q_vector->rx.next_update = jiffies + 1;
556fdfd6
AD
3472 q_vector->rx.target_itr =
3473 ITR_TO_REG(vsi->rx_rings[i]->itr_setting);
41c445ff 3474 wr32(hw, I40E_PFINT_ITRN(I40E_RX_ITR, vector - 1),
556fdfd6
AD
3475 q_vector->rx.target_itr);
3476 q_vector->rx.current_itr = q_vector->rx.target_itr;
a0073a4b
AD
3477
3478 q_vector->tx.next_update = jiffies + 1;
556fdfd6
AD
3479 q_vector->tx.target_itr =
3480 ITR_TO_REG(vsi->tx_rings[i]->itr_setting);
41c445ff 3481 wr32(hw, I40E_PFINT_ITRN(I40E_TX_ITR, vector - 1),
556fdfd6
AD
3482 q_vector->tx.target_itr);
3483 q_vector->tx.current_itr = q_vector->tx.target_itr;
a0073a4b 3484
ac26fc13 3485 wr32(hw, I40E_PFINT_RATEN(vector - 1),
1c0e6a36 3486 i40e_intrl_usec_to_reg(vsi->int_rate_limit));
41c445ff
JB
3487
3488 /* Linked list for the queuepairs assigned to this vector */
3489 wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), qp);
3490 for (q = 0; q < q_vector->num_ringpairs; q++) {
74608d17 3491 u32 nextqp = has_xdp ? qp + vsi->alloc_queue_pairs : qp;
ac26fc13
JB
3492 u32 val;
3493
41c445ff 3494 val = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
74608d17
BT
3495 (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
3496 (vector << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
3497 (nextqp << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
3498 (I40E_QUEUE_TYPE_TX <<
3499 I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT);
41c445ff
JB
3500
3501 wr32(hw, I40E_QINT_RQCTL(qp), val);
3502
74608d17
BT
3503 if (has_xdp) {
3504 val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
3505 (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
3506 (vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
3507 (qp << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) |
3508 (I40E_QUEUE_TYPE_TX <<
3509 I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
3510
3511 wr32(hw, I40E_QINT_TQCTL(nextqp), val);
3512 }
3513
41c445ff 3514 val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
74608d17
BT
3515 (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
3516 (vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
3517 ((qp + 1) << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) |
3518 (I40E_QUEUE_TYPE_RX <<
3519 I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
41c445ff
JB
3520
3521 /* Terminate the linked list */
3522 if (q == (q_vector->num_ringpairs - 1))
74608d17
BT
3523 val |= (I40E_QUEUE_END_OF_LIST <<
3524 I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
41c445ff
JB
3525
3526 wr32(hw, I40E_QINT_TQCTL(qp), val);
3527 qp++;
3528 }
3529 }
3530
3531 i40e_flush(hw);
3532}
3533
3534/**
3535 * i40e_enable_misc_int_causes - enable the non-queue interrupts
f5254429 3536 * @pf: pointer to private device data structure
41c445ff 3537 **/
ab437b5a 3538static void i40e_enable_misc_int_causes(struct i40e_pf *pf)
41c445ff 3539{
ab437b5a 3540 struct i40e_hw *hw = &pf->hw;
41c445ff
JB
3541 u32 val;
3542
3543 /* clear things first */
3544 wr32(hw, I40E_PFINT_ICR0_ENA, 0); /* disable all */
3545 rd32(hw, I40E_PFINT_ICR0); /* read to clear */
3546
3547 val = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK |
3548 I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK |
3549 I40E_PFINT_ICR0_ENA_GRST_MASK |
3550 I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK |
3551 I40E_PFINT_ICR0_ENA_GPIO_MASK |
41c445ff
JB
3552 I40E_PFINT_ICR0_ENA_HMC_ERR_MASK |
3553 I40E_PFINT_ICR0_ENA_VFLR_MASK |
3554 I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
3555
0d8e1439
ASJ
3556 if (pf->flags & I40E_FLAG_IWARP_ENABLED)
3557 val |= I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK;
3558
ab437b5a
JK
3559 if (pf->flags & I40E_FLAG_PTP)
3560 val |= I40E_PFINT_ICR0_ENA_TIMESYNC_MASK;
3561
41c445ff
JB
3562 wr32(hw, I40E_PFINT_ICR0_ENA, val);
3563
3564 /* SW_ITR_IDX = 0, but don't change INTENA */
84ed40e7
ASJ
3565 wr32(hw, I40E_PFINT_DYN_CTL0, I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK |
3566 I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK);
41c445ff
JB
3567
3568 /* OTHER_ITR_IDX = 0 */
3569 wr32(hw, I40E_PFINT_STAT_CTL0, 0);
3570}
3571
3572/**
3573 * i40e_configure_msi_and_legacy - Legacy mode interrupt config in the HW
3574 * @vsi: the VSI being configured
3575 **/
3576static void i40e_configure_msi_and_legacy(struct i40e_vsi *vsi)
3577{
74608d17 3578 u32 nextqp = i40e_enabled_xdp_vsi(vsi) ? vsi->alloc_queue_pairs : 0;
493fb300 3579 struct i40e_q_vector *q_vector = vsi->q_vectors[0];
41c445ff
JB
3580 struct i40e_pf *pf = vsi->back;
3581 struct i40e_hw *hw = &pf->hw;
3582 u32 val;
3583
3584 /* set the ITR configuration */
a0073a4b 3585 q_vector->rx.next_update = jiffies + 1;
556fdfd6 3586 q_vector->rx.target_itr = ITR_TO_REG(vsi->rx_rings[0]->itr_setting);
556fdfd6
AD
3587 wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), q_vector->rx.target_itr);
3588 q_vector->rx.current_itr = q_vector->rx.target_itr;
a0073a4b 3589 q_vector->tx.next_update = jiffies + 1;
556fdfd6 3590 q_vector->tx.target_itr = ITR_TO_REG(vsi->tx_rings[0]->itr_setting);
556fdfd6
AD
3591 wr32(hw, I40E_PFINT_ITR0(I40E_TX_ITR), q_vector->tx.target_itr);
3592 q_vector->tx.current_itr = q_vector->tx.target_itr;
41c445ff 3593
ab437b5a 3594 i40e_enable_misc_int_causes(pf);
41c445ff
JB
3595
3596 /* FIRSTQ_INDX = 0, FIRSTQ_TYPE = 0 (rx) */
3597 wr32(hw, I40E_PFINT_LNKLST0, 0);
3598
f29eaa3d 3599 /* Associate the queue pair to the vector and enable the queue int */
74608d17
BT
3600 val = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
3601 (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
3602 (nextqp << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT)|
41c445ff
JB
3603 (I40E_QUEUE_TYPE_TX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
3604
3605 wr32(hw, I40E_QINT_RQCTL(0), val);
3606
74608d17
BT
3607 if (i40e_enabled_xdp_vsi(vsi)) {
3608 val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
3609 (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT)|
3610 (I40E_QUEUE_TYPE_TX
3611 << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
3612
1d67ad39 3613 wr32(hw, I40E_QINT_TQCTL(nextqp), val);
74608d17
BT
3614 }
3615
41c445ff
JB
3616 val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
3617 (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
3618 (I40E_QUEUE_END_OF_LIST << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
3619
3620 wr32(hw, I40E_QINT_TQCTL(0), val);
3621 i40e_flush(hw);
3622}
3623
2ef28cfb
MW
3624/**
3625 * i40e_irq_dynamic_disable_icr0 - Disable default interrupt generation for icr0
3626 * @pf: board private structure
3627 **/
3628void i40e_irq_dynamic_disable_icr0(struct i40e_pf *pf)
3629{
3630 struct i40e_hw *hw = &pf->hw;
3631
3632 wr32(hw, I40E_PFINT_DYN_CTL0,
3633 I40E_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
3634 i40e_flush(hw);
3635}
3636
41c445ff
JB
3637/**
3638 * i40e_irq_dynamic_enable_icr0 - Enable default interrupt generation for icr0
3639 * @pf: board private structure
3640 **/
dbadbbe2 3641void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf)
41c445ff
JB
3642{
3643 struct i40e_hw *hw = &pf->hw;
3644 u32 val;
3645
3646 val = I40E_PFINT_DYN_CTL0_INTENA_MASK |
dbadbbe2 3647 I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
41c445ff
JB
3648 (I40E_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT);
3649
3650 wr32(hw, I40E_PFINT_DYN_CTL0, val);
3651 i40e_flush(hw);
3652}
3653
41c445ff
JB
3654/**
3655 * i40e_msix_clean_rings - MSIX mode Interrupt Handler
3656 * @irq: interrupt number
3657 * @data: pointer to a q_vector
3658 **/
3659static irqreturn_t i40e_msix_clean_rings(int irq, void *data)
3660{
3661 struct i40e_q_vector *q_vector = data;
3662
cd0b6fa6 3663 if (!q_vector->tx.ring && !q_vector->rx.ring)
41c445ff
JB
3664 return IRQ_HANDLED;
3665
5d3465a1 3666 napi_schedule_irqoff(&q_vector->napi);
41c445ff
JB
3667
3668 return IRQ_HANDLED;
3669}
3670
96db776a
AB
3671/**
3672 * i40e_irq_affinity_notify - Callback for affinity changes
3673 * @notify: context as to what irq was changed
3674 * @mask: the new affinity mask
3675 *
3676 * This is a callback function used by the irq_set_affinity_notifier function
3677 * so that we may register to receive changes to the irq affinity masks.
3678 **/
3679static void i40e_irq_affinity_notify(struct irq_affinity_notify *notify,
3680 const cpumask_t *mask)
3681{
3682 struct i40e_q_vector *q_vector =
3683 container_of(notify, struct i40e_q_vector, affinity_notify);
3684
7e4d01e7 3685 cpumask_copy(&q_vector->affinity_mask, mask);
96db776a
AB
3686}
3687
3688/**
3689 * i40e_irq_affinity_release - Callback for affinity notifier release
3690 * @ref: internal core kernel usage
3691 *
3692 * This is a callback function used by the irq_set_affinity_notifier function
3693 * to inform the current notification subscriber that they will no longer
3694 * receive notifications.
3695 **/
3696static void i40e_irq_affinity_release(struct kref *ref) {}
3697
41c445ff
JB
3698/**
3699 * i40e_vsi_request_irq_msix - Initialize MSI-X interrupts
3700 * @vsi: the VSI being configured
3701 * @basename: name for the vector
3702 *
3703 * Allocates MSI-X vectors and requests interrupts from the kernel.
3704 **/
3705static int i40e_vsi_request_irq_msix(struct i40e_vsi *vsi, char *basename)
3706{
3707 int q_vectors = vsi->num_q_vectors;
3708 struct i40e_pf *pf = vsi->back;
3709 int base = vsi->base_vector;
3710 int rx_int_idx = 0;
3711 int tx_int_idx = 0;
3712 int vector, err;
96db776a 3713 int irq_num;
be664cbe 3714 int cpu;
41c445ff
JB
3715
3716 for (vector = 0; vector < q_vectors; vector++) {
493fb300 3717 struct i40e_q_vector *q_vector = vsi->q_vectors[vector];
41c445ff 3718
96db776a
AB
3719 irq_num = pf->msix_entries[base + vector].vector;
3720
cd0b6fa6 3721 if (q_vector->tx.ring && q_vector->rx.ring) {
41c445ff
JB
3722 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
3723 "%s-%s-%d", basename, "TxRx", rx_int_idx++);
3724 tx_int_idx++;
cd0b6fa6 3725 } else if (q_vector->rx.ring) {
41c445ff
JB
3726 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
3727 "%s-%s-%d", basename, "rx", rx_int_idx++);
cd0b6fa6 3728 } else if (q_vector->tx.ring) {
41c445ff
JB
3729 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
3730 "%s-%s-%d", basename, "tx", tx_int_idx++);
3731 } else {
3732 /* skip this unused q_vector */
3733 continue;
3734 }
96db776a 3735 err = request_irq(irq_num,
41c445ff
JB
3736 vsi->irq_handler,
3737 0,
3738 q_vector->name,
3739 q_vector);
3740 if (err) {
3741 dev_info(&pf->pdev->dev,
fb43201f 3742 "MSIX request_irq failed, error: %d\n", err);
41c445ff
JB
3743 goto free_queue_irqs;
3744 }
96db776a
AB
3745
3746 /* register for affinity change notifications */
3747 q_vector->affinity_notify.notify = i40e_irq_affinity_notify;
3748 q_vector->affinity_notify.release = i40e_irq_affinity_release;
3749 irq_set_affinity_notifier(irq_num, &q_vector->affinity_notify);
be664cbe
JK
3750 /* Spread affinity hints out across online CPUs.
3751 *
3752 * get_cpu_mask returns a static constant mask with
3753 * a permanent lifetime so it's ok to pass to
3754 * irq_set_affinity_hint without making a copy.
759dc4a7 3755 */
be664cbe
JK
3756 cpu = cpumask_local_spread(q_vector->v_idx, -1);
3757 irq_set_affinity_hint(irq_num, get_cpu_mask(cpu));
41c445ff
JB
3758 }
3759
63741846 3760 vsi->irqs_ready = true;
41c445ff
JB
3761 return 0;
3762
3763free_queue_irqs:
3764 while (vector) {
3765 vector--;
96db776a
AB
3766 irq_num = pf->msix_entries[base + vector].vector;
3767 irq_set_affinity_notifier(irq_num, NULL);
3768 irq_set_affinity_hint(irq_num, NULL);
3769 free_irq(irq_num, &vsi->q_vectors[vector]);
41c445ff
JB
3770 }
3771 return err;
3772}
3773
3774/**
3775 * i40e_vsi_disable_irq - Mask off queue interrupt generation on the VSI
3776 * @vsi: the VSI being un-configured
3777 **/
3778static void i40e_vsi_disable_irq(struct i40e_vsi *vsi)
3779{
3780 struct i40e_pf *pf = vsi->back;
3781 struct i40e_hw *hw = &pf->hw;
3782 int base = vsi->base_vector;
3783 int i;
3784
2e5c26ea 3785 /* disable interrupt causation from each queue */
41c445ff 3786 for (i = 0; i < vsi->num_queue_pairs; i++) {
2e5c26ea
SN
3787 u32 val;
3788
3789 val = rd32(hw, I40E_QINT_TQCTL(vsi->tx_rings[i]->reg_idx));
3790 val &= ~I40E_QINT_TQCTL_CAUSE_ENA_MASK;
3791 wr32(hw, I40E_QINT_TQCTL(vsi->tx_rings[i]->reg_idx), val);
3792
3793 val = rd32(hw, I40E_QINT_RQCTL(vsi->rx_rings[i]->reg_idx));
3794 val &= ~I40E_QINT_RQCTL_CAUSE_ENA_MASK;
3795 wr32(hw, I40E_QINT_RQCTL(vsi->rx_rings[i]->reg_idx), val);
3796
74608d17
BT
3797 if (!i40e_enabled_xdp_vsi(vsi))
3798 continue;
3799 wr32(hw, I40E_QINT_TQCTL(vsi->xdp_rings[i]->reg_idx), 0);
41c445ff
JB
3800 }
3801
2e5c26ea 3802 /* disable each interrupt */
41c445ff
JB
3803 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
3804 for (i = vsi->base_vector;
3805 i < (vsi->num_q_vectors + vsi->base_vector); i++)
3806 wr32(hw, I40E_PFINT_DYN_CTLN(i - 1), 0);
3807
3808 i40e_flush(hw);
3809 for (i = 0; i < vsi->num_q_vectors; i++)
3810 synchronize_irq(pf->msix_entries[i + base].vector);
3811 } else {
3812 /* Legacy and MSI mode - this stops all interrupt handling */
3813 wr32(hw, I40E_PFINT_ICR0_ENA, 0);
3814 wr32(hw, I40E_PFINT_DYN_CTL0, 0);
3815 i40e_flush(hw);
3816 synchronize_irq(pf->pdev->irq);
3817 }
3818}
3819
3820/**
3821 * i40e_vsi_enable_irq - Enable IRQ for the given VSI
3822 * @vsi: the VSI being configured
3823 **/
3824static int i40e_vsi_enable_irq(struct i40e_vsi *vsi)
3825{
3826 struct i40e_pf *pf = vsi->back;
3827 int i;
3828
3829 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
7845548d 3830 for (i = 0; i < vsi->num_q_vectors; i++)
41c445ff
JB
3831 i40e_irq_dynamic_enable(vsi, i);
3832 } else {
dbadbbe2 3833 i40e_irq_dynamic_enable_icr0(pf);
41c445ff
JB
3834 }
3835
1022cb6c 3836 i40e_flush(&pf->hw);
41c445ff
JB
3837 return 0;
3838}
3839
3840/**
c17401a1 3841 * i40e_free_misc_vector - Free the vector that handles non-queue events
41c445ff
JB
3842 * @pf: board private structure
3843 **/
c17401a1 3844static void i40e_free_misc_vector(struct i40e_pf *pf)
41c445ff
JB
3845{
3846 /* Disable ICR 0 */
3847 wr32(&pf->hw, I40E_PFINT_ICR0_ENA, 0);
3848 i40e_flush(&pf->hw);
c17401a1
JK
3849
3850 if (pf->flags & I40E_FLAG_MSIX_ENABLED && pf->msix_entries) {
3851 synchronize_irq(pf->msix_entries[0].vector);
3852 free_irq(pf->msix_entries[0].vector, pf);
3853 clear_bit(__I40E_MISC_IRQ_REQUESTED, pf->state);
3854 }
41c445ff
JB
3855}
3856
3857/**
3858 * i40e_intr - MSI/Legacy and non-queue interrupt handler
3859 * @irq: interrupt number
3860 * @data: pointer to a q_vector
3861 *
3862 * This is the handler used for all MSI/Legacy interrupts, and deals
3863 * with both queue and non-queue interrupts. This is also used in
3864 * MSIX mode to handle the non-queue interrupts.
3865 **/
3866static irqreturn_t i40e_intr(int irq, void *data)
3867{
3868 struct i40e_pf *pf = (struct i40e_pf *)data;
3869 struct i40e_hw *hw = &pf->hw;
5e823066 3870 irqreturn_t ret = IRQ_NONE;
41c445ff
JB
3871 u32 icr0, icr0_remaining;
3872 u32 val, ena_mask;
3873
3874 icr0 = rd32(hw, I40E_PFINT_ICR0);
5e823066 3875 ena_mask = rd32(hw, I40E_PFINT_ICR0_ENA);
41c445ff 3876
116a57d4
SN
3877 /* if sharing a legacy IRQ, we might get called w/o an intr pending */
3878 if ((icr0 & I40E_PFINT_ICR0_INTEVENT_MASK) == 0)
5e823066 3879 goto enable_intr;
41c445ff 3880
cd92e72f
SN
3881 /* if interrupt but no bits showing, must be SWINT */
3882 if (((icr0 & ~I40E_PFINT_ICR0_INTEVENT_MASK) == 0) ||
3883 (icr0 & I40E_PFINT_ICR0_SWINT_MASK))
3884 pf->sw_int_count++;
3885
0d8e1439 3886 if ((pf->flags & I40E_FLAG_IWARP_ENABLED) &&
7642984b 3887 (icr0 & I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK)) {
0d8e1439 3888 ena_mask &= ~I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK;
23bb6dc3 3889 dev_dbg(&pf->pdev->dev, "cleared PE_CRITERR\n");
7642984b 3890 set_bit(__I40E_CORE_RESET_REQUESTED, pf->state);
0d8e1439
ASJ
3891 }
3892
41c445ff
JB
3893 /* only q0 is used in MSI/Legacy mode, and none are used in MSIX */
3894 if (icr0 & I40E_PFINT_ICR0_QUEUE_0_MASK) {
5d3465a1
AD
3895 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
3896 struct i40e_q_vector *q_vector = vsi->q_vectors[0];
41c445ff 3897
a16ae2d5
ASJ
3898 /* We do not have a way to disarm Queue causes while leaving
3899 * interrupt enabled for all other causes, ideally
3900 * interrupt should be disabled while we are in NAPI but
3901 * this is not a performance path and napi_schedule()
3902 * can deal with rescheduling.
3903 */
9e6c9c0f 3904 if (!test_bit(__I40E_DOWN, pf->state))
5d3465a1 3905 napi_schedule_irqoff(&q_vector->napi);
41c445ff
JB
3906 }
3907
3908 if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
3909 ena_mask &= ~I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
0da36b97 3910 set_bit(__I40E_ADMINQ_EVENT_PENDING, pf->state);
6e93d0c9 3911 i40e_debug(&pf->hw, I40E_DEBUG_NVM, "AdminQ event\n");
41c445ff
JB
3912 }
3913
3914 if (icr0 & I40E_PFINT_ICR0_MAL_DETECT_MASK) {
3915 ena_mask &= ~I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
0da36b97 3916 set_bit(__I40E_MDD_EVENT_PENDING, pf->state);
41c445ff
JB
3917 }
3918
3919 if (icr0 & I40E_PFINT_ICR0_VFLR_MASK) {
3920 ena_mask &= ~I40E_PFINT_ICR0_ENA_VFLR_MASK;
0da36b97 3921 set_bit(__I40E_VFLR_EVENT_PENDING, pf->state);
41c445ff
JB
3922 }
3923
3924 if (icr0 & I40E_PFINT_ICR0_GRST_MASK) {
0da36b97
JK
3925 if (!test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
3926 set_bit(__I40E_RESET_INTR_RECEIVED, pf->state);
41c445ff
JB
3927 ena_mask &= ~I40E_PFINT_ICR0_ENA_GRST_MASK;
3928 val = rd32(hw, I40E_GLGEN_RSTAT);
3929 val = (val & I40E_GLGEN_RSTAT_RESET_TYPE_MASK)
3930 >> I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT;
4eb3f768 3931 if (val == I40E_RESET_CORER) {
41c445ff 3932 pf->corer_count++;
4eb3f768 3933 } else if (val == I40E_RESET_GLOBR) {
41c445ff 3934 pf->globr_count++;
4eb3f768 3935 } else if (val == I40E_RESET_EMPR) {
41c445ff 3936 pf->empr_count++;
0da36b97 3937 set_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state);
4eb3f768 3938 }
41c445ff
JB
3939 }
3940
9c010ee0
ASJ
3941 if (icr0 & I40E_PFINT_ICR0_HMC_ERR_MASK) {
3942 icr0 &= ~I40E_PFINT_ICR0_HMC_ERR_MASK;
3943 dev_info(&pf->pdev->dev, "HMC error interrupt\n");
25fc0e65
ASJ
3944 dev_info(&pf->pdev->dev, "HMC error info 0x%x, HMC error data 0x%x\n",
3945 rd32(hw, I40E_PFHMC_ERRORINFO),
3946 rd32(hw, I40E_PFHMC_ERRORDATA));
9c010ee0
ASJ
3947 }
3948
beb0dff1
JK
3949 if (icr0 & I40E_PFINT_ICR0_TIMESYNC_MASK) {
3950 u32 prttsyn_stat = rd32(hw, I40E_PRTTSYN_STAT_0);
3951
3952 if (prttsyn_stat & I40E_PRTTSYN_STAT_0_TXTIME_MASK) {
cafa1fca 3953 icr0 &= ~I40E_PFINT_ICR0_ENA_TIMESYNC_MASK;
beb0dff1 3954 i40e_ptp_tx_hwtstamp(pf);
beb0dff1 3955 }
beb0dff1
JK
3956 }
3957
41c445ff
JB
3958 /* If a critical error is pending we have no choice but to reset the
3959 * device.
3960 * Report and mask out any remaining unexpected interrupts.
3961 */
3962 icr0_remaining = icr0 & ena_mask;
3963 if (icr0_remaining) {
3964 dev_info(&pf->pdev->dev, "unhandled interrupt icr0=0x%08x\n",
3965 icr0_remaining);
9c010ee0 3966 if ((icr0_remaining & I40E_PFINT_ICR0_PE_CRITERR_MASK) ||
41c445ff 3967 (icr0_remaining & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK) ||
c0c28975 3968 (icr0_remaining & I40E_PFINT_ICR0_ECC_ERR_MASK)) {
9c010ee0 3969 dev_info(&pf->pdev->dev, "device will be reset\n");
0da36b97 3970 set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
9c010ee0 3971 i40e_service_event_schedule(pf);
41c445ff
JB
3972 }
3973 ena_mask &= ~icr0_remaining;
3974 }
5e823066 3975 ret = IRQ_HANDLED;
41c445ff 3976
5e823066 3977enable_intr:
41c445ff
JB
3978 /* re-enable interrupt causes */
3979 wr32(hw, I40E_PFINT_ICR0_ENA, ena_mask);
9e6c9c0f 3980 if (!test_bit(__I40E_DOWN, pf->state)) {
41c445ff 3981 i40e_service_event_schedule(pf);
dbadbbe2 3982 i40e_irq_dynamic_enable_icr0(pf);
41c445ff
JB
3983 }
3984
5e823066 3985 return ret;
41c445ff
JB
3986}
3987
cbf61325
ASJ
3988/**
3989 * i40e_clean_fdir_tx_irq - Reclaim resources after transmit completes
3990 * @tx_ring: tx ring to clean
3991 * @budget: how many cleans we're allowed
3992 *
3993 * Returns true if there's any budget left (e.g. the clean is finished)
3994 **/
3995static bool i40e_clean_fdir_tx_irq(struct i40e_ring *tx_ring, int budget)
3996{
3997 struct i40e_vsi *vsi = tx_ring->vsi;
3998 u16 i = tx_ring->next_to_clean;
3999 struct i40e_tx_buffer *tx_buf;
4000 struct i40e_tx_desc *tx_desc;
4001
4002 tx_buf = &tx_ring->tx_bi[i];
4003 tx_desc = I40E_TX_DESC(tx_ring, i);
4004 i -= tx_ring->count;
4005
4006 do {
4007 struct i40e_tx_desc *eop_desc = tx_buf->next_to_watch;
4008
4009 /* if next_to_watch is not set then there is no work pending */
4010 if (!eop_desc)
4011 break;
4012
4013 /* prevent any other reads prior to eop_desc */
52c6912f 4014 smp_rmb();
cbf61325
ASJ
4015
4016 /* if the descriptor isn't done, no work yet to do */
4017 if (!(eop_desc->cmd_type_offset_bsz &
4018 cpu_to_le64(I40E_TX_DESC_DTYPE_DESC_DONE)))
4019 break;
4020
4021 /* clear next_to_watch to prevent false hangs */
4022 tx_buf->next_to_watch = NULL;
4023
49d7d933
ASJ
4024 tx_desc->buffer_addr = 0;
4025 tx_desc->cmd_type_offset_bsz = 0;
4026 /* move past filter desc */
4027 tx_buf++;
4028 tx_desc++;
4029 i++;
4030 if (unlikely(!i)) {
4031 i -= tx_ring->count;
4032 tx_buf = tx_ring->tx_bi;
4033 tx_desc = I40E_TX_DESC(tx_ring, 0);
4034 }
cbf61325
ASJ
4035 /* unmap skb header data */
4036 dma_unmap_single(tx_ring->dev,
4037 dma_unmap_addr(tx_buf, dma),
4038 dma_unmap_len(tx_buf, len),
4039 DMA_TO_DEVICE);
49d7d933
ASJ
4040 if (tx_buf->tx_flags & I40E_TX_FLAGS_FD_SB)
4041 kfree(tx_buf->raw_buf);
cbf61325 4042
49d7d933
ASJ
4043 tx_buf->raw_buf = NULL;
4044 tx_buf->tx_flags = 0;
4045 tx_buf->next_to_watch = NULL;
cbf61325 4046 dma_unmap_len_set(tx_buf, len, 0);
49d7d933
ASJ
4047 tx_desc->buffer_addr = 0;
4048 tx_desc->cmd_type_offset_bsz = 0;
cbf61325 4049
49d7d933 4050 /* move us past the eop_desc for start of next FD desc */
cbf61325
ASJ
4051 tx_buf++;
4052 tx_desc++;
4053 i++;
4054 if (unlikely(!i)) {
4055 i -= tx_ring->count;
4056 tx_buf = tx_ring->tx_bi;
4057 tx_desc = I40E_TX_DESC(tx_ring, 0);
4058 }
4059
4060 /* update budget accounting */
4061 budget--;
4062 } while (likely(budget));
4063
4064 i += tx_ring->count;
4065 tx_ring->next_to_clean = i;
4066
6995b36c 4067 if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED)
7845548d 4068 i40e_irq_dynamic_enable(vsi, tx_ring->q_vector->v_idx);
6995b36c 4069
cbf61325
ASJ
4070 return budget > 0;
4071}
4072
4073/**
4074 * i40e_fdir_clean_ring - Interrupt Handler for FDIR SB ring
4075 * @irq: interrupt number
4076 * @data: pointer to a q_vector
4077 **/
4078static irqreturn_t i40e_fdir_clean_ring(int irq, void *data)
4079{
4080 struct i40e_q_vector *q_vector = data;
4081 struct i40e_vsi *vsi;
4082
4083 if (!q_vector->tx.ring)
4084 return IRQ_HANDLED;
4085
4086 vsi = q_vector->tx.ring->vsi;
4087 i40e_clean_fdir_tx_irq(q_vector->tx.ring, vsi->work_limit);
4088
4089 return IRQ_HANDLED;
4090}
4091
41c445ff 4092/**
cd0b6fa6 4093 * i40e_map_vector_to_qp - Assigns the queue pair to the vector
41c445ff
JB
4094 * @vsi: the VSI being configured
4095 * @v_idx: vector index
cd0b6fa6 4096 * @qp_idx: queue pair index
41c445ff 4097 **/
26cdc443 4098static void i40e_map_vector_to_qp(struct i40e_vsi *vsi, int v_idx, int qp_idx)
41c445ff 4099{
493fb300 4100 struct i40e_q_vector *q_vector = vsi->q_vectors[v_idx];
9f65e15b
AD
4101 struct i40e_ring *tx_ring = vsi->tx_rings[qp_idx];
4102 struct i40e_ring *rx_ring = vsi->rx_rings[qp_idx];
41c445ff
JB
4103
4104 tx_ring->q_vector = q_vector;
cd0b6fa6
AD
4105 tx_ring->next = q_vector->tx.ring;
4106 q_vector->tx.ring = tx_ring;
41c445ff 4107 q_vector->tx.count++;
cd0b6fa6 4108
74608d17
BT
4109 /* Place XDP Tx ring in the same q_vector ring list as regular Tx */
4110 if (i40e_enabled_xdp_vsi(vsi)) {
4111 struct i40e_ring *xdp_ring = vsi->xdp_rings[qp_idx];
4112
4113 xdp_ring->q_vector = q_vector;
4114 xdp_ring->next = q_vector->tx.ring;
4115 q_vector->tx.ring = xdp_ring;
4116 q_vector->tx.count++;
4117 }
4118
cd0b6fa6
AD
4119 rx_ring->q_vector = q_vector;
4120 rx_ring->next = q_vector->rx.ring;
4121 q_vector->rx.ring = rx_ring;
4122 q_vector->rx.count++;
41c445ff
JB
4123}
4124
4125/**
4126 * i40e_vsi_map_rings_to_vectors - Maps descriptor rings to vectors
4127 * @vsi: the VSI being configured
4128 *
4129 * This function maps descriptor rings to the queue-specific vectors
4130 * we were allotted through the MSI-X enabling code. Ideally, we'd have
4131 * one vector per queue pair, but on a constrained vector budget, we
4132 * group the queue pairs as "efficiently" as possible.
4133 **/
4134static void i40e_vsi_map_rings_to_vectors(struct i40e_vsi *vsi)
4135{
4136 int qp_remaining = vsi->num_queue_pairs;
4137 int q_vectors = vsi->num_q_vectors;
cd0b6fa6 4138 int num_ringpairs;
41c445ff
JB
4139 int v_start = 0;
4140 int qp_idx = 0;
4141
4142 /* If we don't have enough vectors for a 1-to-1 mapping, we'll have to
4143 * group them so there are multiple queues per vector.
70114ec4
ASJ
4144 * It is also important to go through all the vectors available to be
4145 * sure that if we don't use all the vectors, that the remaining vectors
4146 * are cleared. This is especially important when decreasing the
4147 * number of queues in use.
41c445ff 4148 */
70114ec4 4149 for (; v_start < q_vectors; v_start++) {
cd0b6fa6
AD
4150 struct i40e_q_vector *q_vector = vsi->q_vectors[v_start];
4151
4152 num_ringpairs = DIV_ROUND_UP(qp_remaining, q_vectors - v_start);
4153
4154 q_vector->num_ringpairs = num_ringpairs;
a3f9fb5e 4155 q_vector->reg_idx = q_vector->v_idx + vsi->base_vector - 1;
cd0b6fa6
AD
4156
4157 q_vector->rx.count = 0;
4158 q_vector->tx.count = 0;
4159 q_vector->rx.ring = NULL;
4160 q_vector->tx.ring = NULL;
4161
4162 while (num_ringpairs--) {
26cdc443 4163 i40e_map_vector_to_qp(vsi, v_start, qp_idx);
cd0b6fa6
AD
4164 qp_idx++;
4165 qp_remaining--;
41c445ff
JB
4166 }
4167 }
4168}
4169
4170/**
4171 * i40e_vsi_request_irq - Request IRQ from the OS
4172 * @vsi: the VSI being configured
4173 * @basename: name for the vector
4174 **/
4175static int i40e_vsi_request_irq(struct i40e_vsi *vsi, char *basename)
4176{
4177 struct i40e_pf *pf = vsi->back;
4178 int err;
4179
4180 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
4181 err = i40e_vsi_request_irq_msix(vsi, basename);
4182 else if (pf->flags & I40E_FLAG_MSI_ENABLED)
4183 err = request_irq(pf->pdev->irq, i40e_intr, 0,
b294ac70 4184 pf->int_name, pf);
41c445ff
JB
4185 else
4186 err = request_irq(pf->pdev->irq, i40e_intr, IRQF_SHARED,
b294ac70 4187 pf->int_name, pf);
41c445ff
JB
4188
4189 if (err)
4190 dev_info(&pf->pdev->dev, "request_irq failed, Error %d\n", err);
4191
4192 return err;
4193}
4194
4195#ifdef CONFIG_NET_POLL_CONTROLLER
4196/**
d89d967f 4197 * i40e_netpoll - A Polling 'interrupt' handler
41c445ff
JB
4198 * @netdev: network interface device structure
4199 *
4200 * This is used by netconsole to send skbs without having to re-enable
4201 * interrupts. It's not called while the normal interrupt routine is executing.
4202 **/
4203static void i40e_netpoll(struct net_device *netdev)
4204{
4205 struct i40e_netdev_priv *np = netdev_priv(netdev);
4206 struct i40e_vsi *vsi = np->vsi;
4207 struct i40e_pf *pf = vsi->back;
4208 int i;
4209
4210 /* if interface is down do nothing */
0da36b97 4211 if (test_bit(__I40E_VSI_DOWN, vsi->state))
41c445ff
JB
4212 return;
4213
41c445ff
JB
4214 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
4215 for (i = 0; i < vsi->num_q_vectors; i++)
493fb300 4216 i40e_msix_clean_rings(0, vsi->q_vectors[i]);
41c445ff
JB
4217 } else {
4218 i40e_intr(pf->pdev->irq, netdev);
4219 }
41c445ff
JB
4220}
4221#endif
4222
c768e490
JK
4223#define I40E_QTX_ENA_WAIT_COUNT 50
4224
23527308
NP
4225/**
4226 * i40e_pf_txq_wait - Wait for a PF's Tx queue to be enabled or disabled
4227 * @pf: the PF being configured
4228 * @pf_q: the PF queue
4229 * @enable: enable or disable state of the queue
4230 *
4231 * This routine will wait for the given Tx queue of the PF to reach the
4232 * enabled or disabled state.
4233 * Returns -ETIMEDOUT in case of failing to reach the requested state after
4234 * multiple retries; else will return 0 in case of success.
4235 **/
4236static int i40e_pf_txq_wait(struct i40e_pf *pf, int pf_q, bool enable)
4237{
4238 int i;
4239 u32 tx_reg;
4240
4241 for (i = 0; i < I40E_QUEUE_WAIT_RETRY_LIMIT; i++) {
4242 tx_reg = rd32(&pf->hw, I40E_QTX_ENA(pf_q));
4243 if (enable == !!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
4244 break;
4245
f98a2006 4246 usleep_range(10, 20);
23527308
NP
4247 }
4248 if (i >= I40E_QUEUE_WAIT_RETRY_LIMIT)
4249 return -ETIMEDOUT;
4250
4251 return 0;
4252}
4253
c768e490
JK
4254/**
4255 * i40e_control_tx_q - Start or stop a particular Tx queue
4256 * @pf: the PF structure
4257 * @pf_q: the PF queue to configure
4258 * @enable: start or stop the queue
4259 *
4260 * This function enables or disables a single queue. Note that any delay
4261 * required after the operation is expected to be handled by the caller of
4262 * this function.
4263 **/
4264static void i40e_control_tx_q(struct i40e_pf *pf, int pf_q, bool enable)
4265{
4266 struct i40e_hw *hw = &pf->hw;
4267 u32 tx_reg;
4268 int i;
4269
4270 /* warn the TX unit of coming changes */
4271 i40e_pre_tx_queue_cfg(&pf->hw, pf_q, enable);
4272 if (!enable)
4273 usleep_range(10, 20);
4274
4275 for (i = 0; i < I40E_QTX_ENA_WAIT_COUNT; i++) {
4276 tx_reg = rd32(hw, I40E_QTX_ENA(pf_q));
4277 if (((tx_reg >> I40E_QTX_ENA_QENA_REQ_SHIFT) & 1) ==
4278 ((tx_reg >> I40E_QTX_ENA_QENA_STAT_SHIFT) & 1))
4279 break;
4280 usleep_range(1000, 2000);
4281 }
4282
4283 /* Skip if the queue is already in the requested state */
4284 if (enable == !!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
4285 return;
4286
4287 /* turn on/off the queue */
4288 if (enable) {
4289 wr32(hw, I40E_QTX_HEAD(pf_q), 0);
4290 tx_reg |= I40E_QTX_ENA_QENA_REQ_MASK;
4291 } else {
4292 tx_reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
4293 }
4294
4295 wr32(hw, I40E_QTX_ENA(pf_q), tx_reg);
4296}
4297
74608d17
BT
4298/**
4299 * i40e_control_wait_tx_q - Start/stop Tx queue and wait for completion
4300 * @seid: VSI SEID
4301 * @pf: the PF structure
4302 * @pf_q: the PF queue to configure
4303 * @is_xdp: true if the queue is used for XDP
4304 * @enable: start or stop the queue
4305 **/
d0fda04d
HR
4306int i40e_control_wait_tx_q(int seid, struct i40e_pf *pf, int pf_q,
4307 bool is_xdp, bool enable)
74608d17
BT
4308{
4309 int ret;
4310
4311 i40e_control_tx_q(pf, pf_q, enable);
4312
4313 /* wait for the change to finish */
4314 ret = i40e_pf_txq_wait(pf, pf_q, enable);
4315 if (ret) {
4316 dev_info(&pf->pdev->dev,
4317 "VSI seid %d %sTx ring %d %sable timeout\n",
4318 seid, (is_xdp ? "XDP " : ""), pf_q,
4319 (enable ? "en" : "dis"));
4320 }
4321
4322 return ret;
4323}
4324
41c445ff
JB
4325/**
4326 * i40e_vsi_control_tx - Start or stop a VSI's rings
4327 * @vsi: the VSI being configured
4328 * @enable: start or stop the rings
4329 **/
4330static int i40e_vsi_control_tx(struct i40e_vsi *vsi, bool enable)
4331{
4332 struct i40e_pf *pf = vsi->back;
c768e490 4333 int i, pf_q, ret = 0;
41c445ff
JB
4334
4335 pf_q = vsi->base_queue;
4336 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
74608d17
BT
4337 ret = i40e_control_wait_tx_q(vsi->seid, pf,
4338 pf_q,
4339 false /*is xdp*/, enable);
4340 if (ret)
4341 break;
351499ab 4342
74608d17
BT
4343 if (!i40e_enabled_xdp_vsi(vsi))
4344 continue;
4345
4346 ret = i40e_control_wait_tx_q(vsi->seid, pf,
4347 pf_q + vsi->alloc_queue_pairs,
4348 true /*is xdp*/, enable);
4349 if (ret)
23527308 4350 break;
41c445ff 4351 }
23527308
NP
4352 return ret;
4353}
4354
4355/**
4356 * i40e_pf_rxq_wait - Wait for a PF's Rx queue to be enabled or disabled
4357 * @pf: the PF being configured
4358 * @pf_q: the PF queue
4359 * @enable: enable or disable state of the queue
4360 *
4361 * This routine will wait for the given Rx queue of the PF to reach the
4362 * enabled or disabled state.
4363 * Returns -ETIMEDOUT in case of failing to reach the requested state after
4364 * multiple retries; else will return 0 in case of success.
4365 **/
4366static int i40e_pf_rxq_wait(struct i40e_pf *pf, int pf_q, bool enable)
4367{
4368 int i;
4369 u32 rx_reg;
4370
4371 for (i = 0; i < I40E_QUEUE_WAIT_RETRY_LIMIT; i++) {
4372 rx_reg = rd32(&pf->hw, I40E_QRX_ENA(pf_q));
4373 if (enable == !!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
4374 break;
4375
f98a2006 4376 usleep_range(10, 20);
23527308
NP
4377 }
4378 if (i >= I40E_QUEUE_WAIT_RETRY_LIMIT)
4379 return -ETIMEDOUT;
7134f9ce 4380
41c445ff
JB
4381 return 0;
4382}
4383
c768e490
JK
4384/**
4385 * i40e_control_rx_q - Start or stop a particular Rx queue
4386 * @pf: the PF structure
4387 * @pf_q: the PF queue to configure
4388 * @enable: start or stop the queue
4389 *
d0fda04d
HR
4390 * This function enables or disables a single queue. Note that
4391 * any delay required after the operation is expected to be
4392 * handled by the caller of this function.
c768e490
JK
4393 **/
4394static void i40e_control_rx_q(struct i40e_pf *pf, int pf_q, bool enable)
4395{
4396 struct i40e_hw *hw = &pf->hw;
4397 u32 rx_reg;
4398 int i;
4399
4400 for (i = 0; i < I40E_QTX_ENA_WAIT_COUNT; i++) {
4401 rx_reg = rd32(hw, I40E_QRX_ENA(pf_q));
4402 if (((rx_reg >> I40E_QRX_ENA_QENA_REQ_SHIFT) & 1) ==
4403 ((rx_reg >> I40E_QRX_ENA_QENA_STAT_SHIFT) & 1))
4404 break;
4405 usleep_range(1000, 2000);
4406 }
4407
4408 /* Skip if the queue is already in the requested state */
4409 if (enable == !!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
4410 return;
4411
4412 /* turn on/off the queue */
4413 if (enable)
4414 rx_reg |= I40E_QRX_ENA_QENA_REQ_MASK;
4415 else
4416 rx_reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
4417
4418 wr32(hw, I40E_QRX_ENA(pf_q), rx_reg);
4419}
4420
d0fda04d
HR
4421/**
4422 * i40e_control_wait_rx_q
4423 * @pf: the PF structure
4424 * @pf_q: queue being configured
4425 * @enable: start or stop the rings
4426 *
4427 * This function enables or disables a single queue along with waiting
4428 * for the change to finish. The caller of this function should handle
4429 * the delays needed in the case of disabling queues.
4430 **/
4431int i40e_control_wait_rx_q(struct i40e_pf *pf, int pf_q, bool enable)
4432{
4433 int ret = 0;
4434
4435 i40e_control_rx_q(pf, pf_q, enable);
4436
4437 /* wait for the change to finish */
4438 ret = i40e_pf_rxq_wait(pf, pf_q, enable);
4439 if (ret)
4440 return ret;
4441
4442 return ret;
4443}
4444
41c445ff
JB
4445/**
4446 * i40e_vsi_control_rx - Start or stop a VSI's rings
4447 * @vsi: the VSI being configured
4448 * @enable: start or stop the rings
4449 **/
4450static int i40e_vsi_control_rx(struct i40e_vsi *vsi, bool enable)
4451{
4452 struct i40e_pf *pf = vsi->back;
c768e490 4453 int i, pf_q, ret = 0;
41c445ff
JB
4454
4455 pf_q = vsi->base_queue;
4456 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
d0fda04d 4457 ret = i40e_control_wait_rx_q(pf, pf_q, enable);
23527308
NP
4458 if (ret) {
4459 dev_info(&pf->pdev->dev,
fb43201f
SN
4460 "VSI seid %d Rx ring %d %sable timeout\n",
4461 vsi->seid, pf_q, (enable ? "en" : "dis"));
23527308 4462 break;
41c445ff
JB
4463 }
4464 }
4465
d08a9f6c
WC
4466 /* Due to HW errata, on Rx disable only, the register can indicate done
4467 * before it really is. Needs 50ms to be sure
4468 */
4469 if (!enable)
4470 mdelay(50);
4471
23527308 4472 return ret;
41c445ff
JB
4473}
4474
4475/**
3aa7b74d 4476 * i40e_vsi_start_rings - Start a VSI's rings
41c445ff 4477 * @vsi: the VSI being configured
41c445ff 4478 **/
3aa7b74d 4479int i40e_vsi_start_rings(struct i40e_vsi *vsi)
41c445ff 4480{
3b867b28 4481 int ret = 0;
41c445ff
JB
4482
4483 /* do rx first for enable and last for disable */
3aa7b74d
FS
4484 ret = i40e_vsi_control_rx(vsi, true);
4485 if (ret)
4486 return ret;
4487 ret = i40e_vsi_control_tx(vsi, true);
41c445ff
JB
4488
4489 return ret;
4490}
4491
3aa7b74d
FS
4492/**
4493 * i40e_vsi_stop_rings - Stop a VSI's rings
4494 * @vsi: the VSI being configured
4495 **/
4496void i40e_vsi_stop_rings(struct i40e_vsi *vsi)
4497{
3480756f 4498 /* When port TX is suspended, don't wait */
0da36b97 4499 if (test_bit(__I40E_PORT_SUSPENDED, vsi->back->state))
3480756f
JK
4500 return i40e_vsi_stop_rings_no_wait(vsi);
4501
3aa7b74d
FS
4502 /* do rx first for enable and last for disable
4503 * Ignore return value, we need to shutdown whatever we can
4504 */
4505 i40e_vsi_control_tx(vsi, false);
4506 i40e_vsi_control_rx(vsi, false);
4507}
4508
e4b433f4
JK
4509/**
4510 * i40e_vsi_stop_rings_no_wait - Stop a VSI's rings and do not delay
4511 * @vsi: the VSI being shutdown
4512 *
4513 * This function stops all the rings for a VSI but does not delay to verify
4514 * that rings have been disabled. It is expected that the caller is shutting
4515 * down multiple VSIs at once and will delay together for all the VSIs after
4516 * initiating the shutdown. This is particularly useful for shutting down lots
4517 * of VFs together. Otherwise, a large delay can be incurred while configuring
4518 * each VSI in serial.
4519 **/
4520void i40e_vsi_stop_rings_no_wait(struct i40e_vsi *vsi)
4521{
4522 struct i40e_pf *pf = vsi->back;
4523 int i, pf_q;
4524
4525 pf_q = vsi->base_queue;
4526 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
4527 i40e_control_tx_q(pf, pf_q, false);
4528 i40e_control_rx_q(pf, pf_q, false);
4529 }
4530}
4531
41c445ff
JB
4532/**
4533 * i40e_vsi_free_irq - Free the irq association with the OS
4534 * @vsi: the VSI being configured
4535 **/
4536static void i40e_vsi_free_irq(struct i40e_vsi *vsi)
4537{
4538 struct i40e_pf *pf = vsi->back;
4539 struct i40e_hw *hw = &pf->hw;
4540 int base = vsi->base_vector;
4541 u32 val, qp;
4542 int i;
4543
4544 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
4545 if (!vsi->q_vectors)
4546 return;
4547
63741846
SN
4548 if (!vsi->irqs_ready)
4549 return;
4550
4551 vsi->irqs_ready = false;
41c445ff 4552 for (i = 0; i < vsi->num_q_vectors; i++) {
96db776a
AB
4553 int irq_num;
4554 u16 vector;
4555
4556 vector = i + base;
4557 irq_num = pf->msix_entries[vector].vector;
41c445ff
JB
4558
4559 /* free only the irqs that were actually requested */
78681b1f
SN
4560 if (!vsi->q_vectors[i] ||
4561 !vsi->q_vectors[i]->num_ringpairs)
41c445ff
JB
4562 continue;
4563
96db776a
AB
4564 /* clear the affinity notifier in the IRQ descriptor */
4565 irq_set_affinity_notifier(irq_num, NULL);
759dc4a7 4566 /* remove our suggested affinity mask for this IRQ */
96db776a
AB
4567 irq_set_affinity_hint(irq_num, NULL);
4568 synchronize_irq(irq_num);
4569 free_irq(irq_num, vsi->q_vectors[i]);
41c445ff
JB
4570
4571 /* Tear down the interrupt queue link list
4572 *
4573 * We know that they come in pairs and always
4574 * the Rx first, then the Tx. To clear the
4575 * link list, stick the EOL value into the
4576 * next_q field of the registers.
4577 */
4578 val = rd32(hw, I40E_PFINT_LNKLSTN(vector - 1));
4579 qp = (val & I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK)
4580 >> I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT;
4581 val |= I40E_QUEUE_END_OF_LIST
4582 << I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT;
4583 wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), val);
4584
4585 while (qp != I40E_QUEUE_END_OF_LIST) {
4586 u32 next;
4587
4588 val = rd32(hw, I40E_QINT_RQCTL(qp));
4589
4590 val &= ~(I40E_QINT_RQCTL_MSIX_INDX_MASK |
4591 I40E_QINT_RQCTL_MSIX0_INDX_MASK |
4592 I40E_QINT_RQCTL_CAUSE_ENA_MASK |
4593 I40E_QINT_RQCTL_INTEVENT_MASK);
4594
4595 val |= (I40E_QINT_RQCTL_ITR_INDX_MASK |
4596 I40E_QINT_RQCTL_NEXTQ_INDX_MASK);
4597
4598 wr32(hw, I40E_QINT_RQCTL(qp), val);
4599
4600 val = rd32(hw, I40E_QINT_TQCTL(qp));
4601
4602 next = (val & I40E_QINT_TQCTL_NEXTQ_INDX_MASK)
4603 >> I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT;
4604
4605 val &= ~(I40E_QINT_TQCTL_MSIX_INDX_MASK |
4606 I40E_QINT_TQCTL_MSIX0_INDX_MASK |
4607 I40E_QINT_TQCTL_CAUSE_ENA_MASK |
4608 I40E_QINT_TQCTL_INTEVENT_MASK);
4609
4610 val |= (I40E_QINT_TQCTL_ITR_INDX_MASK |
4611 I40E_QINT_TQCTL_NEXTQ_INDX_MASK);
4612
4613 wr32(hw, I40E_QINT_TQCTL(qp), val);
4614 qp = next;
4615 }
4616 }
4617 } else {
4618 free_irq(pf->pdev->irq, pf);
4619
4620 val = rd32(hw, I40E_PFINT_LNKLST0);
4621 qp = (val & I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK)
4622 >> I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT;
4623 val |= I40E_QUEUE_END_OF_LIST
4624 << I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT;
4625 wr32(hw, I40E_PFINT_LNKLST0, val);
4626
4627 val = rd32(hw, I40E_QINT_RQCTL(qp));
4628 val &= ~(I40E_QINT_RQCTL_MSIX_INDX_MASK |
4629 I40E_QINT_RQCTL_MSIX0_INDX_MASK |
4630 I40E_QINT_RQCTL_CAUSE_ENA_MASK |
4631 I40E_QINT_RQCTL_INTEVENT_MASK);
4632
4633 val |= (I40E_QINT_RQCTL_ITR_INDX_MASK |
4634 I40E_QINT_RQCTL_NEXTQ_INDX_MASK);
4635
4636 wr32(hw, I40E_QINT_RQCTL(qp), val);
4637
4638 val = rd32(hw, I40E_QINT_TQCTL(qp));
4639
4640 val &= ~(I40E_QINT_TQCTL_MSIX_INDX_MASK |
4641 I40E_QINT_TQCTL_MSIX0_INDX_MASK |
4642 I40E_QINT_TQCTL_CAUSE_ENA_MASK |
4643 I40E_QINT_TQCTL_INTEVENT_MASK);
4644
4645 val |= (I40E_QINT_TQCTL_ITR_INDX_MASK |
4646 I40E_QINT_TQCTL_NEXTQ_INDX_MASK);
4647
4648 wr32(hw, I40E_QINT_TQCTL(qp), val);
4649 }
4650}
4651
493fb300
AD
4652/**
4653 * i40e_free_q_vector - Free memory allocated for specific interrupt vector
4654 * @vsi: the VSI being configured
4655 * @v_idx: Index of vector to be freed
4656 *
4657 * This function frees the memory allocated to the q_vector. In addition if
4658 * NAPI is enabled it will delete any references to the NAPI struct prior
4659 * to freeing the q_vector.
4660 **/
4661static void i40e_free_q_vector(struct i40e_vsi *vsi, int v_idx)
4662{
4663 struct i40e_q_vector *q_vector = vsi->q_vectors[v_idx];
cd0b6fa6 4664 struct i40e_ring *ring;
493fb300
AD
4665
4666 if (!q_vector)
4667 return;
4668
4669 /* disassociate q_vector from rings */
cd0b6fa6
AD
4670 i40e_for_each_ring(ring, q_vector->tx)
4671 ring->q_vector = NULL;
4672
4673 i40e_for_each_ring(ring, q_vector->rx)
4674 ring->q_vector = NULL;
493fb300
AD
4675
4676 /* only VSI w/ an associated netdev is set up w/ NAPI */
4677 if (vsi->netdev)
4678 netif_napi_del(&q_vector->napi);
4679
4680 vsi->q_vectors[v_idx] = NULL;
4681
4682 kfree_rcu(q_vector, rcu);
4683}
4684
41c445ff
JB
4685/**
4686 * i40e_vsi_free_q_vectors - Free memory allocated for interrupt vectors
4687 * @vsi: the VSI being un-configured
4688 *
4689 * This frees the memory allocated to the q_vectors and
4690 * deletes references to the NAPI struct.
4691 **/
4692static void i40e_vsi_free_q_vectors(struct i40e_vsi *vsi)
4693{
4694 int v_idx;
4695
493fb300
AD
4696 for (v_idx = 0; v_idx < vsi->num_q_vectors; v_idx++)
4697 i40e_free_q_vector(vsi, v_idx);
41c445ff
JB
4698}
4699
4700/**
4701 * i40e_reset_interrupt_capability - Disable interrupt setup in OS
4702 * @pf: board private structure
4703 **/
4704static void i40e_reset_interrupt_capability(struct i40e_pf *pf)
4705{
4706 /* If we're in Legacy mode, the interrupt was cleaned in vsi_close */
4707 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
4708 pci_disable_msix(pf->pdev);
4709 kfree(pf->msix_entries);
4710 pf->msix_entries = NULL;
3b444399
SN
4711 kfree(pf->irq_pile);
4712 pf->irq_pile = NULL;
41c445ff
JB
4713 } else if (pf->flags & I40E_FLAG_MSI_ENABLED) {
4714 pci_disable_msi(pf->pdev);
4715 }
4716 pf->flags &= ~(I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED);
4717}
4718
4719/**
4720 * i40e_clear_interrupt_scheme - Clear the current interrupt scheme settings
4721 * @pf: board private structure
4722 *
4723 * We go through and clear interrupt specific resources and reset the structure
4724 * to pre-load conditions
4725 **/
4726static void i40e_clear_interrupt_scheme(struct i40e_pf *pf)
4727{
4728 int i;
4729
c17401a1 4730 i40e_free_misc_vector(pf);
e147758d 4731
e3219ce6
ASJ
4732 i40e_put_lump(pf->irq_pile, pf->iwarp_base_vector,
4733 I40E_IWARP_IRQ_PILE_ID);
4734
41c445ff 4735 i40e_put_lump(pf->irq_pile, 0, I40E_PILE_VALID_BIT-1);
505682cd 4736 for (i = 0; i < pf->num_alloc_vsi; i++)
41c445ff
JB
4737 if (pf->vsi[i])
4738 i40e_vsi_free_q_vectors(pf->vsi[i]);
4739 i40e_reset_interrupt_capability(pf);
4740}
4741
4742/**
4743 * i40e_napi_enable_all - Enable NAPI for all q_vectors in the VSI
4744 * @vsi: the VSI being configured
4745 **/
4746static void i40e_napi_enable_all(struct i40e_vsi *vsi)
4747{
4748 int q_idx;
4749
4750 if (!vsi->netdev)
4751 return;
4752
13a8cd19
AD
4753 for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) {
4754 struct i40e_q_vector *q_vector = vsi->q_vectors[q_idx];
4755
4756 if (q_vector->rx.ring || q_vector->tx.ring)
4757 napi_enable(&q_vector->napi);
4758 }
41c445ff
JB
4759}
4760
4761/**
4762 * i40e_napi_disable_all - Disable NAPI for all q_vectors in the VSI
4763 * @vsi: the VSI being configured
4764 **/
4765static void i40e_napi_disable_all(struct i40e_vsi *vsi)
4766{
4767 int q_idx;
4768
4769 if (!vsi->netdev)
4770 return;
4771
13a8cd19
AD
4772 for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) {
4773 struct i40e_q_vector *q_vector = vsi->q_vectors[q_idx];
4774
4775 if (q_vector->rx.ring || q_vector->tx.ring)
4776 napi_disable(&q_vector->napi);
4777 }
41c445ff
JB
4778}
4779
90ef8d47
SN
4780/**
4781 * i40e_vsi_close - Shut down a VSI
4782 * @vsi: the vsi to be quelled
4783 **/
4784static void i40e_vsi_close(struct i40e_vsi *vsi)
4785{
0ef2d5af 4786 struct i40e_pf *pf = vsi->back;
0da36b97 4787 if (!test_and_set_bit(__I40E_VSI_DOWN, vsi->state))
90ef8d47
SN
4788 i40e_down(vsi);
4789 i40e_vsi_free_irq(vsi);
4790 i40e_vsi_free_tx_resources(vsi);
4791 i40e_vsi_free_rx_resources(vsi);
92faef85 4792 vsi->current_netdev_flags = 0;
5f76a704 4793 set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state);
0da36b97 4794 if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
5f76a704 4795 set_bit(__I40E_CLIENT_RESET, pf->state);
90ef8d47
SN
4796}
4797
41c445ff
JB
4798/**
4799 * i40e_quiesce_vsi - Pause a given VSI
4800 * @vsi: the VSI being paused
4801 **/
4802static void i40e_quiesce_vsi(struct i40e_vsi *vsi)
4803{
0da36b97 4804 if (test_bit(__I40E_VSI_DOWN, vsi->state))
41c445ff
JB
4805 return;
4806
0da36b97 4807 set_bit(__I40E_VSI_NEEDS_RESTART, vsi->state);
6995b36c 4808 if (vsi->netdev && netif_running(vsi->netdev))
41c445ff 4809 vsi->netdev->netdev_ops->ndo_stop(vsi->netdev);
6995b36c 4810 else
90ef8d47 4811 i40e_vsi_close(vsi);
41c445ff
JB
4812}
4813
4814/**
4815 * i40e_unquiesce_vsi - Resume a given VSI
4816 * @vsi: the VSI being resumed
4817 **/
4818static void i40e_unquiesce_vsi(struct i40e_vsi *vsi)
4819{
0da36b97 4820 if (!test_and_clear_bit(__I40E_VSI_NEEDS_RESTART, vsi->state))
41c445ff
JB
4821 return;
4822
41c445ff
JB
4823 if (vsi->netdev && netif_running(vsi->netdev))
4824 vsi->netdev->netdev_ops->ndo_open(vsi->netdev);
4825 else
8276f757 4826 i40e_vsi_open(vsi); /* this clears the DOWN bit */
41c445ff
JB
4827}
4828
4829/**
4830 * i40e_pf_quiesce_all_vsi - Pause all VSIs on a PF
4831 * @pf: the PF
4832 **/
4833static void i40e_pf_quiesce_all_vsi(struct i40e_pf *pf)
4834{
4835 int v;
4836
505682cd 4837 for (v = 0; v < pf->num_alloc_vsi; v++) {
41c445ff
JB
4838 if (pf->vsi[v])
4839 i40e_quiesce_vsi(pf->vsi[v]);
4840 }
4841}
4842
4843/**
4844 * i40e_pf_unquiesce_all_vsi - Resume all VSIs on a PF
4845 * @pf: the PF
4846 **/
4847static void i40e_pf_unquiesce_all_vsi(struct i40e_pf *pf)
4848{
4849 int v;
4850
505682cd 4851 for (v = 0; v < pf->num_alloc_vsi; v++) {
41c445ff
JB
4852 if (pf->vsi[v])
4853 i40e_unquiesce_vsi(pf->vsi[v]);
4854 }
4855}
4856
69129dc3 4857/**
3fe06f41 4858 * i40e_vsi_wait_queues_disabled - Wait for VSI's queues to be disabled
69129dc3
NP
4859 * @vsi: the VSI being configured
4860 *
af26ce2d 4861 * Wait until all queues on a given VSI have been disabled.
69129dc3 4862 **/
e4b433f4 4863int i40e_vsi_wait_queues_disabled(struct i40e_vsi *vsi)
69129dc3
NP
4864{
4865 struct i40e_pf *pf = vsi->back;
4866 int i, pf_q, ret;
4867
4868 pf_q = vsi->base_queue;
4869 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
af26ce2d 4870 /* Check and wait for the Tx queue */
69129dc3
NP
4871 ret = i40e_pf_txq_wait(pf, pf_q, false);
4872 if (ret) {
4873 dev_info(&pf->pdev->dev,
fb43201f
SN
4874 "VSI seid %d Tx ring %d disable timeout\n",
4875 vsi->seid, pf_q);
69129dc3
NP
4876 return ret;
4877 }
74608d17
BT
4878
4879 if (!i40e_enabled_xdp_vsi(vsi))
4880 goto wait_rx;
4881
4882 /* Check and wait for the XDP Tx queue */
4883 ret = i40e_pf_txq_wait(pf, pf_q + vsi->alloc_queue_pairs,
4884 false);
4885 if (ret) {
4886 dev_info(&pf->pdev->dev,
4887 "VSI seid %d XDP Tx ring %d disable timeout\n",
4888 vsi->seid, pf_q);
4889 return ret;
4890 }
4891wait_rx:
4892 /* Check and wait for the Rx queue */
3fe06f41
NP
4893 ret = i40e_pf_rxq_wait(pf, pf_q, false);
4894 if (ret) {
4895 dev_info(&pf->pdev->dev,
4896 "VSI seid %d Rx ring %d disable timeout\n",
4897 vsi->seid, pf_q);
4898 return ret;
4899 }
4900 }
4901
69129dc3
NP
4902 return 0;
4903}
4904
e4b433f4 4905#ifdef CONFIG_I40E_DCB
69129dc3 4906/**
3fe06f41 4907 * i40e_pf_wait_queues_disabled - Wait for all queues of PF VSIs to be disabled
69129dc3
NP
4908 * @pf: the PF
4909 *
3fe06f41 4910 * This function waits for the queues to be in disabled state for all the
69129dc3
NP
4911 * VSIs that are managed by this PF.
4912 **/
3fe06f41 4913static int i40e_pf_wait_queues_disabled(struct i40e_pf *pf)
69129dc3
NP
4914{
4915 int v, ret = 0;
4916
4917 for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
c76cb6ed 4918 if (pf->vsi[v]) {
3fe06f41 4919 ret = i40e_vsi_wait_queues_disabled(pf->vsi[v]);
69129dc3
NP
4920 if (ret)
4921 break;
4922 }
4923 }
4924
4925 return ret;
4926}
4927
4928#endif
b03a8c1f 4929
63d7e5a4
NP
4930/**
4931 * i40e_get_iscsi_tc_map - Return TC map for iSCSI APP
b40c82e6 4932 * @pf: pointer to PF
63d7e5a4
NP
4933 *
4934 * Get TC map for ISCSI PF type that will include iSCSI TC
4935 * and LAN TC.
4936 **/
4937static u8 i40e_get_iscsi_tc_map(struct i40e_pf *pf)
4938{
4939 struct i40e_dcb_app_priority_table app;
4940 struct i40e_hw *hw = &pf->hw;
4941 u8 enabled_tc = 1; /* TC0 is always enabled */
4942 u8 tc, i;
4943 /* Get the iSCSI APP TLV */
4944 struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config;
4945
4946 for (i = 0; i < dcbcfg->numapps; i++) {
4947 app = dcbcfg->app[i];
4948 if (app.selector == I40E_APP_SEL_TCPIP &&
4949 app.protocolid == I40E_APP_PROTOID_ISCSI) {
4950 tc = dcbcfg->etscfg.prioritytable[app.priority];
75f5cea9 4951 enabled_tc |= BIT(tc);
63d7e5a4
NP
4952 break;
4953 }
4954 }
4955
4956 return enabled_tc;
4957}
4958
41c445ff
JB
4959/**
4960 * i40e_dcb_get_num_tc - Get the number of TCs from DCBx config
4961 * @dcbcfg: the corresponding DCBx configuration structure
4962 *
4963 * Return the number of TCs from given DCBx configuration
4964 **/
4965static u8 i40e_dcb_get_num_tc(struct i40e_dcbx_config *dcbcfg)
4966{
fbfe12c6 4967 int i, tc_unused = 0;
078b5876 4968 u8 num_tc = 0;
fbfe12c6 4969 u8 ret = 0;
41c445ff
JB
4970
4971 /* Scan the ETS Config Priority Table to find
4972 * traffic class enabled for a given priority
fbfe12c6 4973 * and create a bitmask of enabled TCs
41c445ff 4974 */
fbfe12c6
DE
4975 for (i = 0; i < I40E_MAX_USER_PRIORITY; i++)
4976 num_tc |= BIT(dcbcfg->etscfg.prioritytable[i]);
41c445ff 4977
fbfe12c6
DE
4978 /* Now scan the bitmask to check for
4979 * contiguous TCs starting with TC0
41c445ff 4980 */
fbfe12c6
DE
4981 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
4982 if (num_tc & BIT(i)) {
4983 if (!tc_unused) {
4984 ret++;
4985 } else {
4986 pr_err("Non-contiguous TC - Disabling DCB\n");
4987 return 1;
4988 }
4989 } else {
4990 tc_unused = 1;
4991 }
4992 }
4993
4994 /* There is always at least TC0 */
4995 if (!ret)
4996 ret = 1;
4997
4998 return ret;
41c445ff
JB
4999}
5000
5001/**
5002 * i40e_dcb_get_enabled_tc - Get enabled traffic classes
5003 * @dcbcfg: the corresponding DCBx configuration structure
5004 *
5005 * Query the current DCB configuration and return the number of
5006 * traffic classes enabled from the given DCBX config
5007 **/
5008static u8 i40e_dcb_get_enabled_tc(struct i40e_dcbx_config *dcbcfg)
5009{
5010 u8 num_tc = i40e_dcb_get_num_tc(dcbcfg);
5011 u8 enabled_tc = 1;
5012 u8 i;
5013
5014 for (i = 0; i < num_tc; i++)
41a1d04b 5015 enabled_tc |= BIT(i);
41c445ff
JB
5016
5017 return enabled_tc;
5018}
5019
a9ce82f7
AN
5020/**
5021 * i40e_mqprio_get_enabled_tc - Get enabled traffic classes
5022 * @pf: PF being queried
5023 *
5024 * Query the current MQPRIO configuration and return the number of
5025 * traffic classes enabled.
5026 **/
5027static u8 i40e_mqprio_get_enabled_tc(struct i40e_pf *pf)
5028{
5029 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
5030 u8 num_tc = vsi->mqprio_qopt.qopt.num_tc;
5031 u8 enabled_tc = 1, i;
5032
5033 for (i = 1; i < num_tc; i++)
5034 enabled_tc |= BIT(i);
5035 return enabled_tc;
5036}
5037
41c445ff
JB
5038/**
5039 * i40e_pf_get_num_tc - Get enabled traffic classes for PF
5040 * @pf: PF being queried
5041 *
5042 * Return number of traffic classes enabled for the given PF
5043 **/
5044static u8 i40e_pf_get_num_tc(struct i40e_pf *pf)
5045{
5046 struct i40e_hw *hw = &pf->hw;
52a08caa 5047 u8 i, enabled_tc = 1;
41c445ff
JB
5048 u8 num_tc = 0;
5049 struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config;
5050
a9ce82f7
AN
5051 if (pf->flags & I40E_FLAG_TC_MQPRIO)
5052 return pf->vsi[pf->lan_vsi]->mqprio_qopt.qopt.num_tc;
5053
5054 /* If neither MQPRIO nor DCB is enabled, then always use single TC */
41c445ff
JB
5055 if (!(pf->flags & I40E_FLAG_DCB_ENABLED))
5056 return 1;
5057
63d7e5a4
NP
5058 /* SFP mode will be enabled for all TCs on port */
5059 if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
5060 return i40e_dcb_get_num_tc(dcbcfg);
5061
41c445ff 5062 /* MFP mode return count of enabled TCs for this PF */
63d7e5a4
NP
5063 if (pf->hw.func_caps.iscsi)
5064 enabled_tc = i40e_get_iscsi_tc_map(pf);
5065 else
fc51de96 5066 return 1; /* Only TC0 */
41c445ff 5067
63d7e5a4 5068 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
75f5cea9 5069 if (enabled_tc & BIT(i))
63d7e5a4
NP
5070 num_tc++;
5071 }
5072 return num_tc;
41c445ff
JB
5073}
5074
41c445ff
JB
5075/**
5076 * i40e_pf_get_pf_tc_map - Get bitmap for enabled traffic classes
5077 * @pf: PF being queried
5078 *
5079 * Return a bitmap for enabled traffic classes for this PF.
5080 **/
5081static u8 i40e_pf_get_tc_map(struct i40e_pf *pf)
5082{
a9ce82f7
AN
5083 if (pf->flags & I40E_FLAG_TC_MQPRIO)
5084 return i40e_mqprio_get_enabled_tc(pf);
5085
5086 /* If neither MQPRIO nor DCB is enabled for this PF then just return
5087 * default TC
5088 */
41c445ff 5089 if (!(pf->flags & I40E_FLAG_DCB_ENABLED))
ea6acb7e 5090 return I40E_DEFAULT_TRAFFIC_CLASS;
41c445ff 5091
41c445ff 5092 /* SFP mode we want PF to be enabled for all TCs */
63d7e5a4
NP
5093 if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
5094 return i40e_dcb_get_enabled_tc(&pf->hw.local_dcbx_config);
5095
fc51de96 5096 /* MFP enabled and iSCSI PF type */
63d7e5a4
NP
5097 if (pf->hw.func_caps.iscsi)
5098 return i40e_get_iscsi_tc_map(pf);
5099 else
ea6acb7e 5100 return I40E_DEFAULT_TRAFFIC_CLASS;
41c445ff
JB
5101}
5102
5103/**
5104 * i40e_vsi_get_bw_info - Query VSI BW Information
5105 * @vsi: the VSI being queried
5106 *
5107 * Returns 0 on success, negative value on failure
5108 **/
5109static int i40e_vsi_get_bw_info(struct i40e_vsi *vsi)
5110{
5111 struct i40e_aqc_query_vsi_ets_sla_config_resp bw_ets_config = {0};
5112 struct i40e_aqc_query_vsi_bw_config_resp bw_config = {0};
5113 struct i40e_pf *pf = vsi->back;
5114 struct i40e_hw *hw = &pf->hw;
f1c7e72e 5115 i40e_status ret;
41c445ff 5116 u32 tc_bw_max;
41c445ff
JB
5117 int i;
5118
5119 /* Get the VSI level BW configuration */
f1c7e72e
SN
5120 ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, &bw_config, NULL);
5121 if (ret) {
41c445ff 5122 dev_info(&pf->pdev->dev,
f1c7e72e
SN
5123 "couldn't get PF vsi bw config, err %s aq_err %s\n",
5124 i40e_stat_str(&pf->hw, ret),
5125 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
dcae29be 5126 return -EINVAL;
41c445ff
JB
5127 }
5128
5129 /* Get the VSI level BW configuration per TC */
f1c7e72e
SN
5130 ret = i40e_aq_query_vsi_ets_sla_config(hw, vsi->seid, &bw_ets_config,
5131 NULL);
5132 if (ret) {
41c445ff 5133 dev_info(&pf->pdev->dev,
f1c7e72e
SN
5134 "couldn't get PF vsi ets bw config, err %s aq_err %s\n",
5135 i40e_stat_str(&pf->hw, ret),
5136 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
dcae29be 5137 return -EINVAL;
41c445ff
JB
5138 }
5139
5140 if (bw_config.tc_valid_bits != bw_ets_config.tc_valid_bits) {
5141 dev_info(&pf->pdev->dev,
5142 "Enabled TCs mismatch from querying VSI BW info 0x%08x 0x%08x\n",
5143 bw_config.tc_valid_bits,
5144 bw_ets_config.tc_valid_bits);
5145 /* Still continuing */
5146 }
5147
5148 vsi->bw_limit = le16_to_cpu(bw_config.port_bw_limit);
5149 vsi->bw_max_quanta = bw_config.max_bw;
5150 tc_bw_max = le16_to_cpu(bw_ets_config.tc_bw_max[0]) |
5151 (le16_to_cpu(bw_ets_config.tc_bw_max[1]) << 16);
5152 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
5153 vsi->bw_ets_share_credits[i] = bw_ets_config.share_credits[i];
5154 vsi->bw_ets_limit_credits[i] =
5155 le16_to_cpu(bw_ets_config.credits[i]);
5156 /* 3 bits out of 4 for each TC */
5157 vsi->bw_ets_max_quanta[i] = (u8)((tc_bw_max >> (i*4)) & 0x7);
5158 }
078b5876 5159
dcae29be 5160 return 0;
41c445ff
JB
5161}
5162
5163/**
5164 * i40e_vsi_configure_bw_alloc - Configure VSI BW allocation per TC
5165 * @vsi: the VSI being configured
5166 * @enabled_tc: TC bitmap
f5254429 5167 * @bw_share: BW shared credits per TC
41c445ff
JB
5168 *
5169 * Returns 0 on success, negative value on failure
5170 **/
dcae29be 5171static int i40e_vsi_configure_bw_alloc(struct i40e_vsi *vsi, u8 enabled_tc,
41c445ff
JB
5172 u8 *bw_share)
5173{
5174 struct i40e_aqc_configure_vsi_tc_bw_data bw_data;
fa38e30a 5175 struct i40e_pf *pf = vsi->back;
f1c7e72e 5176 i40e_status ret;
dcae29be 5177 int i;
41c445ff 5178
fa38e30a
MS
5179 /* There is no need to reset BW when mqprio mode is on. */
5180 if (pf->flags & I40E_FLAG_TC_MQPRIO)
a9ce82f7 5181 return 0;
fa38e30a 5182 if (!vsi->mqprio_qopt.qopt.hw && !(pf->flags & I40E_FLAG_DCB_ENABLED)) {
2027d4de
AN
5183 ret = i40e_set_bw_limit(vsi, vsi->seid, 0);
5184 if (ret)
fa38e30a 5185 dev_info(&pf->pdev->dev,
2027d4de
AN
5186 "Failed to reset tx rate for vsi->seid %u\n",
5187 vsi->seid);
5188 return ret;
5189 }
41c445ff
JB
5190 bw_data.tc_valid_bits = enabled_tc;
5191 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
5192 bw_data.tc_bw_credits[i] = bw_share[i];
5193
fa38e30a 5194 ret = i40e_aq_config_vsi_tc_bw(&pf->hw, vsi->seid, &bw_data, NULL);
f1c7e72e 5195 if (ret) {
fa38e30a 5196 dev_info(&pf->pdev->dev,
69bfb110 5197 "AQ command Config VSI BW allocation per TC failed = %d\n",
fa38e30a 5198 pf->hw.aq.asq_last_status);
dcae29be 5199 return -EINVAL;
41c445ff
JB
5200 }
5201
5202 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
5203 vsi->info.qs_handle[i] = bw_data.qs_handles[i];
5204
dcae29be 5205 return 0;
41c445ff
JB
5206}
5207
5208/**
5209 * i40e_vsi_config_netdev_tc - Setup the netdev TC configuration
5210 * @vsi: the VSI being configured
5211 * @enabled_tc: TC map to be enabled
5212 *
5213 **/
5214static void i40e_vsi_config_netdev_tc(struct i40e_vsi *vsi, u8 enabled_tc)
5215{
5216 struct net_device *netdev = vsi->netdev;
5217 struct i40e_pf *pf = vsi->back;
5218 struct i40e_hw *hw = &pf->hw;
5219 u8 netdev_tc = 0;
5220 int i;
5221 struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config;
5222
5223 if (!netdev)
5224 return;
5225
5226 if (!enabled_tc) {
5227 netdev_reset_tc(netdev);
5228 return;
5229 }
5230
5231 /* Set up actual enabled TCs on the VSI */
5232 if (netdev_set_num_tc(netdev, vsi->tc_config.numtc))
5233 return;
5234
5235 /* set per TC queues for the VSI */
5236 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
5237 /* Only set TC queues for enabled tcs
5238 *
5239 * e.g. For a VSI that has TC0 and TC3 enabled the
5240 * enabled_tc bitmap would be 0x00001001; the driver
5241 * will set the numtc for netdev as 2 that will be
5242 * referenced by the netdev layer as TC 0 and 1.
5243 */
75f5cea9 5244 if (vsi->tc_config.enabled_tc & BIT(i))
41c445ff
JB
5245 netdev_set_tc_queue(netdev,
5246 vsi->tc_config.tc_info[i].netdev_tc,
5247 vsi->tc_config.tc_info[i].qcount,
5248 vsi->tc_config.tc_info[i].qoffset);
5249 }
5250
a9ce82f7
AN
5251 if (pf->flags & I40E_FLAG_TC_MQPRIO)
5252 return;
5253
41c445ff
JB
5254 /* Assign UP2TC map for the VSI */
5255 for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
5256 /* Get the actual TC# for the UP */
5257 u8 ets_tc = dcbcfg->etscfg.prioritytable[i];
5258 /* Get the mapped netdev TC# for the UP */
5259 netdev_tc = vsi->tc_config.tc_info[ets_tc].netdev_tc;
5260 netdev_set_prio_tc_map(netdev, i, netdev_tc);
5261 }
5262}
5263
5264/**
5265 * i40e_vsi_update_queue_map - Update our copy of VSi info with new queue map
5266 * @vsi: the VSI being configured
5267 * @ctxt: the ctxt buffer returned from AQ VSI update param command
5268 **/
5269static void i40e_vsi_update_queue_map(struct i40e_vsi *vsi,
5270 struct i40e_vsi_context *ctxt)
5271{
5272 /* copy just the sections touched not the entire info
5273 * since not all sections are valid as returned by
5274 * update vsi params
5275 */
5276 vsi->info.mapping_flags = ctxt->info.mapping_flags;
5277 memcpy(&vsi->info.queue_mapping,
5278 &ctxt->info.queue_mapping, sizeof(vsi->info.queue_mapping));
5279 memcpy(&vsi->info.tc_mapping, ctxt->info.tc_mapping,
5280 sizeof(vsi->info.tc_mapping));
5281}
5282
5283/**
5284 * i40e_vsi_config_tc - Configure VSI Tx Scheduler for given TC map
5285 * @vsi: VSI to be configured
5286 * @enabled_tc: TC bitmap
5287 *
5288 * This configures a particular VSI for TCs that are mapped to the
5289 * given TC bitmap. It uses default bandwidth share for TCs across
5290 * VSIs to configure TC for a particular VSI.
5291 *
5292 * NOTE:
5293 * It is expected that the VSI queues have been quisced before calling
5294 * this function.
5295 **/
5296static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc)
5297{
5298 u8 bw_share[I40E_MAX_TRAFFIC_CLASS] = {0};
d8a87856
MW
5299 struct i40e_pf *pf = vsi->back;
5300 struct i40e_hw *hw = &pf->hw;
41c445ff
JB
5301 struct i40e_vsi_context ctxt;
5302 int ret = 0;
5303 int i;
5304
5305 /* Check if enabled_tc is same as existing or new TCs */
a9ce82f7
AN
5306 if (vsi->tc_config.enabled_tc == enabled_tc &&
5307 vsi->mqprio_qopt.mode != TC_MQPRIO_MODE_CHANNEL)
41c445ff
JB
5308 return ret;
5309
5310 /* Enable ETS TCs with equal BW Share for now across all VSIs */
5311 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
75f5cea9 5312 if (enabled_tc & BIT(i))
41c445ff
JB
5313 bw_share[i] = 1;
5314 }
5315
5316 ret = i40e_vsi_configure_bw_alloc(vsi, enabled_tc, bw_share);
5317 if (ret) {
d8a87856
MW
5318 struct i40e_aqc_query_vsi_bw_config_resp bw_config = {0};
5319
bc73234b 5320 dev_info(&pf->pdev->dev,
41c445ff
JB
5321 "Failed configuring TC map %d for VSI %d\n",
5322 enabled_tc, vsi->seid);
d8a87856
MW
5323 ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid,
5324 &bw_config, NULL);
5325 if (ret) {
5326 dev_info(&pf->pdev->dev,
5327 "Failed querying vsi bw info, err %s aq_err %s\n",
5328 i40e_stat_str(hw, ret),
5329 i40e_aq_str(hw, hw->aq.asq_last_status));
5330 goto out;
5331 }
5332 if ((bw_config.tc_valid_bits & enabled_tc) != enabled_tc) {
5333 u8 valid_tc = bw_config.tc_valid_bits & enabled_tc;
5334
5335 if (!valid_tc)
5336 valid_tc = bw_config.tc_valid_bits;
5337 /* Always enable TC0, no matter what */
5338 valid_tc |= 1;
5339 dev_info(&pf->pdev->dev,
5340 "Requested tc 0x%x, but FW reports 0x%x as valid. Attempting to use 0x%x.\n",
5341 enabled_tc, bw_config.tc_valid_bits, valid_tc);
5342 enabled_tc = valid_tc;
5343 }
5344
5345 ret = i40e_vsi_configure_bw_alloc(vsi, enabled_tc, bw_share);
5346 if (ret) {
5347 dev_err(&pf->pdev->dev,
5348 "Unable to configure TC map %d for VSI %d\n",
5349 enabled_tc, vsi->seid);
5350 goto out;
5351 }
41c445ff
JB
5352 }
5353
5354 /* Update Queue Pairs Mapping for currently enabled UPs */
5355 ctxt.seid = vsi->seid;
5356 ctxt.pf_num = vsi->back->hw.pf_id;
5357 ctxt.vf_num = 0;
5358 ctxt.uplink_seid = vsi->uplink_seid;
1a2f6248 5359 ctxt.info = vsi->info;
a9ce82f7
AN
5360 if (vsi->back->flags & I40E_FLAG_TC_MQPRIO) {
5361 ret = i40e_vsi_setup_queue_map_mqprio(vsi, &ctxt, enabled_tc);
5362 if (ret)
5363 goto out;
5364 } else {
5365 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, false);
5366 }
41c445ff 5367
a9ce82f7
AN
5368 /* On destroying the qdisc, reset vsi->rss_size, as number of enabled
5369 * queues changed.
5370 */
5371 if (!vsi->mqprio_qopt.qopt.hw && vsi->reconfig_rss) {
5372 vsi->rss_size = min_t(int, vsi->back->alloc_rss_size,
5373 vsi->num_queue_pairs);
5374 ret = i40e_vsi_config_rss(vsi);
5375 if (ret) {
5376 dev_info(&vsi->back->pdev->dev,
5377 "Failed to reconfig rss for num_queues\n");
5378 return ret;
5379 }
5380 vsi->reconfig_rss = false;
5381 }
e3219ce6
ASJ
5382 if (vsi->back->flags & I40E_FLAG_IWARP_ENABLED) {
5383 ctxt.info.valid_sections |=
5384 cpu_to_le16(I40E_AQ_VSI_PROP_QUEUE_OPT_VALID);
5385 ctxt.info.queueing_opt_flags |= I40E_AQ_VSI_QUE_OPT_TCP_ENA;
5386 }
5387
a9ce82f7
AN
5388 /* Update the VSI after updating the VSI queue-mapping
5389 * information
5390 */
bc73234b 5391 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
41c445ff 5392 if (ret) {
bc73234b 5393 dev_info(&pf->pdev->dev,
f1c7e72e 5394 "Update vsi tc config failed, err %s aq_err %s\n",
bc73234b
MW
5395 i40e_stat_str(hw, ret),
5396 i40e_aq_str(hw, hw->aq.asq_last_status));
41c445ff
JB
5397 goto out;
5398 }
5399 /* update the local VSI info with updated queue map */
5400 i40e_vsi_update_queue_map(vsi, &ctxt);
5401 vsi->info.valid_sections = 0;
5402
5403 /* Update current VSI BW information */
5404 ret = i40e_vsi_get_bw_info(vsi);
5405 if (ret) {
bc73234b 5406 dev_info(&pf->pdev->dev,
f1c7e72e 5407 "Failed updating vsi bw info, err %s aq_err %s\n",
bc73234b
MW
5408 i40e_stat_str(hw, ret),
5409 i40e_aq_str(hw, hw->aq.asq_last_status));
41c445ff
JB
5410 goto out;
5411 }
5412
5413 /* Update the netdev TC setup */
5414 i40e_vsi_config_netdev_tc(vsi, enabled_tc);
5415out:
5416 return ret;
5417}
5418
4e3b35b0 5419/**
5ecae412
AN
5420 * i40e_get_link_speed - Returns link speed for the interface
5421 * @vsi: VSI to be configured
4e3b35b0 5422 *
4e3b35b0 5423 **/
3758d2c7 5424static int i40e_get_link_speed(struct i40e_vsi *vsi)
4e3b35b0 5425{
5ecae412 5426 struct i40e_pf *pf = vsi->back;
4e3b35b0 5427
5ecae412
AN
5428 switch (pf->hw.phy.link_info.link_speed) {
5429 case I40E_LINK_SPEED_40GB:
5430 return 40000;
5431 case I40E_LINK_SPEED_25GB:
5432 return 25000;
5433 case I40E_LINK_SPEED_20GB:
5434 return 20000;
5435 case I40E_LINK_SPEED_10GB:
5436 return 10000;
5437 case I40E_LINK_SPEED_1GB:
5438 return 1000;
5439 default:
5440 return -EINVAL;
4e3b35b0 5441 }
4e3b35b0
NP
5442}
5443
4e3b35b0 5444/**
5ecae412
AN
5445 * i40e_set_bw_limit - setup BW limit for Tx traffic based on max_tx_rate
5446 * @vsi: VSI to be configured
5447 * @seid: seid of the channel/VSI
5448 * @max_tx_rate: max TX rate to be configured as BW limit
4e3b35b0 5449 *
5ecae412 5450 * Helper function to set BW limit for a given VSI
4e3b35b0 5451 **/
5ecae412 5452int i40e_set_bw_limit(struct i40e_vsi *vsi, u16 seid, u64 max_tx_rate)
4e3b35b0 5453{
5ecae412 5454 struct i40e_pf *pf = vsi->back;
6c32e0d9 5455 u64 credits = 0;
5ecae412
AN
5456 int speed = 0;
5457 int ret = 0;
4e3b35b0 5458
5ecae412
AN
5459 speed = i40e_get_link_speed(vsi);
5460 if (max_tx_rate > speed) {
5461 dev_err(&pf->pdev->dev,
5462 "Invalid max tx rate %llu specified for VSI seid %d.",
5463 max_tx_rate, seid);
5464 return -EINVAL;
5465 }
5466 if (max_tx_rate && max_tx_rate < 50) {
5467 dev_warn(&pf->pdev->dev,
5468 "Setting max tx rate to minimum usable value of 50Mbps.\n");
5469 max_tx_rate = 50;
5470 }
5471
5472 /* Tx rate credits are in values of 50Mbps, 0 is disabled */
6c32e0d9
AB
5473 credits = max_tx_rate;
5474 do_div(credits, I40E_BW_CREDIT_DIVISOR);
5475 ret = i40e_aq_config_vsi_bw_limit(&pf->hw, seid, credits,
5ecae412
AN
5476 I40E_MAX_BW_INACTIVE_ACCUM, NULL);
5477 if (ret)
5478 dev_err(&pf->pdev->dev,
5479 "Failed set tx rate (%llu Mbps) for vsi->seid %u, err %s aq_err %s\n",
5480 max_tx_rate, seid, i40e_stat_str(&pf->hw, ret),
5481 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
5482 return ret;
5483}
5484
8f88b303
AN
5485/**
5486 * i40e_remove_queue_channels - Remove queue channels for the TCs
5487 * @vsi: VSI to be configured
5488 *
5489 * Remove queue channels for the TCs
5490 **/
5491static void i40e_remove_queue_channels(struct i40e_vsi *vsi)
5492{
2f4b411a
AN
5493 enum i40e_admin_queue_err last_aq_status;
5494 struct i40e_cloud_filter *cfilter;
8f88b303 5495 struct i40e_channel *ch, *ch_tmp;
2f4b411a
AN
5496 struct i40e_pf *pf = vsi->back;
5497 struct hlist_node *node;
8f88b303
AN
5498 int ret, i;
5499
5500 /* Reset rss size that was stored when reconfiguring rss for
5501 * channel VSIs with non-power-of-2 queue count.
5502 */
5503 vsi->current_rss_size = 0;
5504
5505 /* perform cleanup for channels if they exist */
5506 if (list_empty(&vsi->ch_list))
5507 return;
5508
5509 list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list, list) {
5510 struct i40e_vsi *p_vsi;
5511
5512 list_del(&ch->list);
5513 p_vsi = ch->parent_vsi;
5514 if (!p_vsi || !ch->initialized) {
5515 kfree(ch);
4e3b35b0 5516 continue;
8f88b303
AN
5517 }
5518 /* Reset queue contexts */
5519 for (i = 0; i < ch->num_queue_pairs; i++) {
5520 struct i40e_ring *tx_ring, *rx_ring;
5521 u16 pf_q;
5522
5523 pf_q = ch->base_queue + i;
5524 tx_ring = vsi->tx_rings[pf_q];
5525 tx_ring->ch = NULL;
5526
5527 rx_ring = vsi->rx_rings[pf_q];
5528 rx_ring->ch = NULL;
5529 }
5530
2027d4de
AN
5531 /* Reset BW configured for this VSI via mqprio */
5532 ret = i40e_set_bw_limit(vsi, ch->seid, 0);
5533 if (ret)
5534 dev_info(&vsi->back->pdev->dev,
5535 "Failed to reset tx rate for ch->seid %u\n",
5536 ch->seid);
5537
2f4b411a
AN
5538 /* delete cloud filters associated with this channel */
5539 hlist_for_each_entry_safe(cfilter, node,
5540 &pf->cloud_filter_list, cloud_node) {
5541 if (cfilter->seid != ch->seid)
5542 continue;
5543
5544 hash_del(&cfilter->cloud_node);
5545 if (cfilter->dst_port)
5546 ret = i40e_add_del_cloud_filter_big_buf(vsi,
5547 cfilter,
5548 false);
5549 else
5550 ret = i40e_add_del_cloud_filter(vsi, cfilter,
5551 false);
5552 last_aq_status = pf->hw.aq.asq_last_status;
5553 if (ret)
5554 dev_info(&pf->pdev->dev,
5555 "Failed to delete cloud filter, err %s aq_err %s\n",
5556 i40e_stat_str(&pf->hw, ret),
5557 i40e_aq_str(&pf->hw, last_aq_status));
5558 kfree(cfilter);
5559 }
5560
8f88b303
AN
5561 /* delete VSI from FW */
5562 ret = i40e_aq_delete_element(&vsi->back->hw, ch->seid,
5563 NULL);
5564 if (ret)
5565 dev_err(&vsi->back->pdev->dev,
5566 "unable to remove channel (%d) for parent VSI(%d)\n",
5567 ch->seid, p_vsi->seid);
5568 kfree(ch);
5569 }
5570 INIT_LIST_HEAD(&vsi->ch_list);
5571}
5572
5573/**
5574 * i40e_is_any_channel - channel exist or not
5575 * @vsi: ptr to VSI to which channels are associated with
5576 *
5577 * Returns true or false if channel(s) exist for associated VSI or not
5578 **/
5579static bool i40e_is_any_channel(struct i40e_vsi *vsi)
5580{
5581 struct i40e_channel *ch, *ch_tmp;
5582
5583 list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list, list) {
5584 if (ch->initialized)
5585 return true;
5586 }
5587
5588 return false;
5589}
5590
5591/**
5592 * i40e_get_max_queues_for_channel
5593 * @vsi: ptr to VSI to which channels are associated with
5594 *
5595 * Helper function which returns max value among the queue counts set on the
5596 * channels/TCs created.
5597 **/
5598static int i40e_get_max_queues_for_channel(struct i40e_vsi *vsi)
5599{
5600 struct i40e_channel *ch, *ch_tmp;
5601 int max = 0;
5602
5603 list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list, list) {
5604 if (!ch->initialized)
5605 continue;
5606 if (ch->num_queue_pairs > max)
5607 max = ch->num_queue_pairs;
5608 }
5609
5610 return max;
5611}
5612
5613/**
5614 * i40e_validate_num_queues - validate num_queues w.r.t channel
5615 * @pf: ptr to PF device
5616 * @num_queues: number of queues
5617 * @vsi: the parent VSI
5618 * @reconfig_rss: indicates should the RSS be reconfigured or not
5619 *
5620 * This function validates number of queues in the context of new channel
5621 * which is being established and determines if RSS should be reconfigured
5622 * or not for parent VSI.
5623 **/
5624static int i40e_validate_num_queues(struct i40e_pf *pf, int num_queues,
5625 struct i40e_vsi *vsi, bool *reconfig_rss)
5626{
5627 int max_ch_queues;
5628
5629 if (!reconfig_rss)
5630 return -EINVAL;
5631
5632 *reconfig_rss = false;
8f88b303
AN
5633 if (vsi->current_rss_size) {
5634 if (num_queues > vsi->current_rss_size) {
5635 dev_dbg(&pf->pdev->dev,
5636 "Error: num_queues (%d) > vsi's current_size(%d)\n",
5637 num_queues, vsi->current_rss_size);
5638 return -EINVAL;
5639 } else if ((num_queues < vsi->current_rss_size) &&
5640 (!is_power_of_2(num_queues))) {
5641 dev_dbg(&pf->pdev->dev,
5642 "Error: num_queues (%d) < vsi's current_size(%d), but not power of 2\n",
5643 num_queues, vsi->current_rss_size);
5644 return -EINVAL;
5645 }
5646 }
5647
5648 if (!is_power_of_2(num_queues)) {
5649 /* Find the max num_queues configured for channel if channel
5650 * exist.
5651 * if channel exist, then enforce 'num_queues' to be more than
5652 * max ever queues configured for channel.
5653 */
5654 max_ch_queues = i40e_get_max_queues_for_channel(vsi);
5655 if (num_queues < max_ch_queues) {
5656 dev_dbg(&pf->pdev->dev,
5657 "Error: num_queues (%d) < max queues configured for channel(%d)\n",
5658 num_queues, max_ch_queues);
5659 return -EINVAL;
5660 }
5661 *reconfig_rss = true;
5662 }
5663
5664 return 0;
5665}
5666
5667/**
5668 * i40e_vsi_reconfig_rss - reconfig RSS based on specified rss_size
5669 * @vsi: the VSI being setup
5670 * @rss_size: size of RSS, accordingly LUT gets reprogrammed
5671 *
5672 * This function reconfigures RSS by reprogramming LUTs using 'rss_size'
5673 **/
5674static int i40e_vsi_reconfig_rss(struct i40e_vsi *vsi, u16 rss_size)
5675{
5676 struct i40e_pf *pf = vsi->back;
5677 u8 seed[I40E_HKEY_ARRAY_SIZE];
5678 struct i40e_hw *hw = &pf->hw;
5679 int local_rss_size;
5680 u8 *lut;
5681 int ret;
5682
5683 if (!vsi->rss_size)
5684 return -EINVAL;
5685
5686 if (rss_size > vsi->rss_size)
5687 return -EINVAL;
5688
5689 local_rss_size = min_t(int, vsi->rss_size, rss_size);
5690 lut = kzalloc(vsi->rss_table_size, GFP_KERNEL);
5691 if (!lut)
5692 return -ENOMEM;
5693
5694 /* Ignoring user configured lut if there is one */
5695 i40e_fill_rss_lut(pf, lut, vsi->rss_table_size, local_rss_size);
5696
5697 /* Use user configured hash key if there is one, otherwise
5698 * use default.
5699 */
5700 if (vsi->rss_hkey_user)
5701 memcpy(seed, vsi->rss_hkey_user, I40E_HKEY_ARRAY_SIZE);
5702 else
5703 netdev_rss_key_fill((void *)seed, I40E_HKEY_ARRAY_SIZE);
5704
5705 ret = i40e_config_rss(vsi, seed, lut, vsi->rss_table_size);
5706 if (ret) {
5707 dev_info(&pf->pdev->dev,
5708 "Cannot set RSS lut, err %s aq_err %s\n",
5709 i40e_stat_str(hw, ret),
5710 i40e_aq_str(hw, hw->aq.asq_last_status));
5711 kfree(lut);
5712 return ret;
5713 }
5714 kfree(lut);
5715
5716 /* Do the update w.r.t. storing rss_size */
5717 if (!vsi->orig_rss_size)
5718 vsi->orig_rss_size = vsi->rss_size;
5719 vsi->current_rss_size = local_rss_size;
5720
5721 return ret;
5722}
5723
5724/**
5725 * i40e_channel_setup_queue_map - Setup a channel queue map
5726 * @pf: ptr to PF device
5727 * @vsi: the VSI being setup
5728 * @ctxt: VSI context structure
5729 * @ch: ptr to channel structure
5730 *
5731 * Setup queue map for a specific channel
5732 **/
5733static void i40e_channel_setup_queue_map(struct i40e_pf *pf,
5734 struct i40e_vsi_context *ctxt,
5735 struct i40e_channel *ch)
5736{
5737 u16 qcount, qmap, sections = 0;
5738 u8 offset = 0;
5739 int pow;
5740
5741 sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
5742 sections |= I40E_AQ_VSI_PROP_SCHED_VALID;
5743
5744 qcount = min_t(int, ch->num_queue_pairs, pf->num_lan_msix);
5745 ch->num_queue_pairs = qcount;
5746
5747 /* find the next higher power-of-2 of num queue pairs */
5748 pow = ilog2(qcount);
5749 if (!is_power_of_2(qcount))
5750 pow++;
5751
5752 qmap = (offset << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
5753 (pow << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT);
5754
5755 /* Setup queue TC[0].qmap for given VSI context */
5756 ctxt->info.tc_mapping[0] = cpu_to_le16(qmap);
5757
5758 ctxt->info.up_enable_bits = 0x1; /* TC0 enabled */
5759 ctxt->info.mapping_flags |= cpu_to_le16(I40E_AQ_VSI_QUE_MAP_CONTIG);
5760 ctxt->info.queue_mapping[0] = cpu_to_le16(ch->base_queue);
5761 ctxt->info.valid_sections |= cpu_to_le16(sections);
5762}
5763
5764/**
5765 * i40e_add_channel - add a channel by adding VSI
5766 * @pf: ptr to PF device
5767 * @uplink_seid: underlying HW switching element (VEB) ID
5768 * @ch: ptr to channel structure
5769 *
5770 * Add a channel (VSI) using add_vsi and queue_map
5771 **/
5772static int i40e_add_channel(struct i40e_pf *pf, u16 uplink_seid,
5773 struct i40e_channel *ch)
5774{
5775 struct i40e_hw *hw = &pf->hw;
5776 struct i40e_vsi_context ctxt;
5777 u8 enabled_tc = 0x1; /* TC0 enabled */
5778 int ret;
5779
5780 if (ch->type != I40E_VSI_VMDQ2) {
5781 dev_info(&pf->pdev->dev,
5782 "add new vsi failed, ch->type %d\n", ch->type);
5783 return -EINVAL;
5784 }
5785
5786 memset(&ctxt, 0, sizeof(ctxt));
5787 ctxt.pf_num = hw->pf_id;
5788 ctxt.vf_num = 0;
5789 ctxt.uplink_seid = uplink_seid;
5790 ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
5791 if (ch->type == I40E_VSI_VMDQ2)
5792 ctxt.flags = I40E_AQ_VSI_TYPE_VMDQ2;
5793
5794 if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED) {
5795 ctxt.info.valid_sections |=
5796 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
5797 ctxt.info.switch_id =
5798 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
5799 }
5800
5801 /* Set queue map for a given VSI context */
5802 i40e_channel_setup_queue_map(pf, &ctxt, ch);
5803
5804 /* Now time to create VSI */
5805 ret = i40e_aq_add_vsi(hw, &ctxt, NULL);
5806 if (ret) {
5807 dev_info(&pf->pdev->dev,
5808 "add new vsi failed, err %s aq_err %s\n",
5809 i40e_stat_str(&pf->hw, ret),
5810 i40e_aq_str(&pf->hw,
5811 pf->hw.aq.asq_last_status));
5812 return -ENOENT;
5813 }
5814
5815 /* Success, update channel */
5816 ch->enabled_tc = enabled_tc;
5817 ch->seid = ctxt.seid;
5818 ch->vsi_number = ctxt.vsi_number;
5819 ch->stat_counter_idx = cpu_to_le16(ctxt.info.stat_counter_idx);
5820
5821 /* copy just the sections touched not the entire info
5822 * since not all sections are valid as returned by
5823 * update vsi params
5824 */
5825 ch->info.mapping_flags = ctxt.info.mapping_flags;
5826 memcpy(&ch->info.queue_mapping,
5827 &ctxt.info.queue_mapping, sizeof(ctxt.info.queue_mapping));
5828 memcpy(&ch->info.tc_mapping, ctxt.info.tc_mapping,
5829 sizeof(ctxt.info.tc_mapping));
5830
5831 return 0;
5832}
5833
5834static int i40e_channel_config_bw(struct i40e_vsi *vsi, struct i40e_channel *ch,
5835 u8 *bw_share)
5836{
5837 struct i40e_aqc_configure_vsi_tc_bw_data bw_data;
5838 i40e_status ret;
5839 int i;
5840
5841 bw_data.tc_valid_bits = ch->enabled_tc;
5842 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
5843 bw_data.tc_bw_credits[i] = bw_share[i];
5844
5845 ret = i40e_aq_config_vsi_tc_bw(&vsi->back->hw, ch->seid,
5846 &bw_data, NULL);
5847 if (ret) {
5848 dev_info(&vsi->back->pdev->dev,
5849 "Config VSI BW allocation per TC failed, aq_err: %d for new_vsi->seid %u\n",
5850 vsi->back->hw.aq.asq_last_status, ch->seid);
5851 return -EINVAL;
5852 }
5853
5854 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
5855 ch->info.qs_handle[i] = bw_data.qs_handles[i];
5856
5857 return 0;
5858}
5859
5860/**
5861 * i40e_channel_config_tx_ring - config TX ring associated with new channel
5862 * @pf: ptr to PF device
5863 * @vsi: the VSI being setup
5864 * @ch: ptr to channel structure
5865 *
5866 * Configure TX rings associated with channel (VSI) since queues are being
5867 * from parent VSI.
5868 **/
5869static int i40e_channel_config_tx_ring(struct i40e_pf *pf,
5870 struct i40e_vsi *vsi,
5871 struct i40e_channel *ch)
5872{
5873 i40e_status ret;
5874 int i;
5875 u8 bw_share[I40E_MAX_TRAFFIC_CLASS] = {0};
5876
5877 /* Enable ETS TCs with equal BW Share for now across all VSIs */
5878 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
5879 if (ch->enabled_tc & BIT(i))
5880 bw_share[i] = 1;
5881 }
5882
5883 /* configure BW for new VSI */
5884 ret = i40e_channel_config_bw(vsi, ch, bw_share);
5885 if (ret) {
5886 dev_info(&vsi->back->pdev->dev,
5887 "Failed configuring TC map %d for channel (seid %u)\n",
5888 ch->enabled_tc, ch->seid);
5889 return ret;
5890 }
5891
5892 for (i = 0; i < ch->num_queue_pairs; i++) {
5893 struct i40e_ring *tx_ring, *rx_ring;
5894 u16 pf_q;
5895
5896 pf_q = ch->base_queue + i;
5897
5898 /* Get to TX ring ptr of main VSI, for re-setup TX queue
5899 * context
5900 */
5901 tx_ring = vsi->tx_rings[pf_q];
5902 tx_ring->ch = ch;
5903
5904 /* Get the RX ring ptr */
5905 rx_ring = vsi->rx_rings[pf_q];
5906 rx_ring->ch = ch;
5907 }
5908
5909 return 0;
5910}
5911
5912/**
5913 * i40e_setup_hw_channel - setup new channel
5914 * @pf: ptr to PF device
5915 * @vsi: the VSI being setup
5916 * @ch: ptr to channel structure
5917 * @uplink_seid: underlying HW switching element (VEB) ID
5918 * @type: type of channel to be created (VMDq2/VF)
5919 *
5920 * Setup new channel (VSI) based on specified type (VMDq2/VF)
5921 * and configures TX rings accordingly
5922 **/
5923static inline int i40e_setup_hw_channel(struct i40e_pf *pf,
5924 struct i40e_vsi *vsi,
5925 struct i40e_channel *ch,
5926 u16 uplink_seid, u8 type)
5927{
5928 int ret;
5929
5930 ch->initialized = false;
5931 ch->base_queue = vsi->next_base_queue;
5932 ch->type = type;
5933
5934 /* Proceed with creation of channel (VMDq2) VSI */
5935 ret = i40e_add_channel(pf, uplink_seid, ch);
5936 if (ret) {
5937 dev_info(&pf->pdev->dev,
5938 "failed to add_channel using uplink_seid %u\n",
5939 uplink_seid);
5940 return ret;
5941 }
5942
5943 /* Mark the successful creation of channel */
5944 ch->initialized = true;
5945
5946 /* Reconfigure TX queues using QTX_CTL register */
5947 ret = i40e_channel_config_tx_ring(pf, vsi, ch);
5948 if (ret) {
5949 dev_info(&pf->pdev->dev,
5950 "failed to configure TX rings for channel %u\n",
5951 ch->seid);
5952 return ret;
5953 }
5954
5955 /* update 'next_base_queue' */
5956 vsi->next_base_queue = vsi->next_base_queue + ch->num_queue_pairs;
5957 dev_dbg(&pf->pdev->dev,
5958 "Added channel: vsi_seid %u, vsi_number %u, stat_counter_idx %u, num_queue_pairs %u, pf->next_base_queue %d\n",
5959 ch->seid, ch->vsi_number, ch->stat_counter_idx,
5960 ch->num_queue_pairs,
5961 vsi->next_base_queue);
5962 return ret;
5963}
5964
5965/**
5966 * i40e_setup_channel - setup new channel using uplink element
5967 * @pf: ptr to PF device
5968 * @type: type of channel to be created (VMDq2/VF)
5969 * @uplink_seid: underlying HW switching element (VEB) ID
5970 * @ch: ptr to channel structure
5971 *
5972 * Setup new channel (VSI) based on specified type (VMDq2/VF)
5973 * and uplink switching element (uplink_seid)
5974 **/
5975static bool i40e_setup_channel(struct i40e_pf *pf, struct i40e_vsi *vsi,
5976 struct i40e_channel *ch)
5977{
5978 u8 vsi_type;
5979 u16 seid;
5980 int ret;
5981
5982 if (vsi->type == I40E_VSI_MAIN) {
5983 vsi_type = I40E_VSI_VMDQ2;
5984 } else {
5985 dev_err(&pf->pdev->dev, "unsupported parent vsi type(%d)\n",
5986 vsi->type);
5987 return false;
5988 }
5989
5990 /* underlying switching element */
5991 seid = pf->vsi[pf->lan_vsi]->uplink_seid;
5992
5993 /* create channel (VSI), configure TX rings */
5994 ret = i40e_setup_hw_channel(pf, vsi, ch, seid, vsi_type);
5995 if (ret) {
5996 dev_err(&pf->pdev->dev, "failed to setup hw_channel\n");
5997 return false;
5998 }
5999
6000 return ch->initialized ? true : false;
6001}
6002
2f4b411a
AN
6003/**
6004 * i40e_validate_and_set_switch_mode - sets up switch mode correctly
6005 * @vsi: ptr to VSI which has PF backing
6006 *
6007 * Sets up switch mode correctly if it needs to be changed and perform
6008 * what are allowed modes.
6009 **/
6010static int i40e_validate_and_set_switch_mode(struct i40e_vsi *vsi)
6011{
6012 u8 mode;
6013 struct i40e_pf *pf = vsi->back;
6014 struct i40e_hw *hw = &pf->hw;
6015 int ret;
6016
6017 ret = i40e_get_capabilities(pf, i40e_aqc_opc_list_dev_capabilities);
6018 if (ret)
6019 return -EINVAL;
6020
6021 if (hw->dev_caps.switch_mode) {
6022 /* if switch mode is set, support mode2 (non-tunneled for
6023 * cloud filter) for now
6024 */
6025 u32 switch_mode = hw->dev_caps.switch_mode &
6026 I40E_SWITCH_MODE_MASK;
6027 if (switch_mode >= I40E_CLOUD_FILTER_MODE1) {
6028 if (switch_mode == I40E_CLOUD_FILTER_MODE2)
6029 return 0;
6030 dev_err(&pf->pdev->dev,
6031 "Invalid switch_mode (%d), only non-tunneled mode for cloud filter is supported\n",
6032 hw->dev_caps.switch_mode);
6033 return -EINVAL;
6034 }
6035 }
6036
6037 /* Set Bit 7 to be valid */
6038 mode = I40E_AQ_SET_SWITCH_BIT7_VALID;
6039
64e711ca
AN
6040 /* Set L4type for TCP support */
6041 mode |= I40E_AQ_SET_SWITCH_L4_TYPE_TCP;
2f4b411a
AN
6042
6043 /* Set cloud filter mode */
6044 mode |= I40E_AQ_SET_SWITCH_MODE_NON_TUNNEL;
6045
6046 /* Prep mode field for set_switch_config */
6047 ret = i40e_aq_set_switch_config(hw, pf->last_sw_conf_flags,
6048 pf->last_sw_conf_valid_flags,
6049 mode, NULL);
6050 if (ret && hw->aq.asq_last_status != I40E_AQ_RC_ESRCH)
6051 dev_err(&pf->pdev->dev,
6052 "couldn't set switch config bits, err %s aq_err %s\n",
6053 i40e_stat_str(hw, ret),
6054 i40e_aq_str(hw,
6055 hw->aq.asq_last_status));
6056
6057 return ret;
6058}
6059
8f88b303
AN
6060/**
6061 * i40e_create_queue_channel - function to create channel
6062 * @vsi: VSI to be configured
6063 * @ch: ptr to channel (it contains channel specific params)
6064 *
6065 * This function creates channel (VSI) using num_queues specified by user,
6066 * reconfigs RSS if needed.
6067 **/
6068int i40e_create_queue_channel(struct i40e_vsi *vsi,
6069 struct i40e_channel *ch)
6070{
6071 struct i40e_pf *pf = vsi->back;
6072 bool reconfig_rss;
6073 int err;
6074
6075 if (!ch)
6076 return -EINVAL;
6077
6078 if (!ch->num_queue_pairs) {
6079 dev_err(&pf->pdev->dev, "Invalid num_queues requested: %d\n",
6080 ch->num_queue_pairs);
6081 return -EINVAL;
6082 }
6083
6084 /* validate user requested num_queues for channel */
6085 err = i40e_validate_num_queues(pf, ch->num_queue_pairs, vsi,
6086 &reconfig_rss);
6087 if (err) {
6088 dev_info(&pf->pdev->dev, "Failed to validate num_queues (%d)\n",
6089 ch->num_queue_pairs);
6090 return -EINVAL;
6091 }
6092
6093 /* By default we are in VEPA mode, if this is the first VF/VMDq
6094 * VSI to be added switch to VEB mode.
6095 */
6096 if ((!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) ||
6097 (!i40e_is_any_channel(vsi))) {
6098 if (!is_power_of_2(vsi->tc_config.tc_info[0].qcount)) {
6099 dev_dbg(&pf->pdev->dev,
6100 "Failed to create channel. Override queues (%u) not power of 2\n",
6101 vsi->tc_config.tc_info[0].qcount);
6102 return -EINVAL;
6103 }
6104
6105 if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) {
6106 pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
6107
6108 if (vsi->type == I40E_VSI_MAIN) {
6109 if (pf->flags & I40E_FLAG_TC_MQPRIO)
6110 i40e_do_reset(pf, I40E_PF_RESET_FLAG,
6111 true);
6112 else
6113 i40e_do_reset_safe(pf,
6114 I40E_PF_RESET_FLAG);
6115 }
6116 }
6117 /* now onwards for main VSI, number of queues will be value
6118 * of TC0's queue count
6119 */
6120 }
6121
6122 /* By this time, vsi->cnt_q_avail shall be set to non-zero and
6123 * it should be more than num_queues
6124 */
6125 if (!vsi->cnt_q_avail || vsi->cnt_q_avail < ch->num_queue_pairs) {
6126 dev_dbg(&pf->pdev->dev,
6127 "Error: cnt_q_avail (%u) less than num_queues %d\n",
6128 vsi->cnt_q_avail, ch->num_queue_pairs);
6129 return -EINVAL;
6130 }
6131
6132 /* reconfig_rss only if vsi type is MAIN_VSI */
6133 if (reconfig_rss && (vsi->type == I40E_VSI_MAIN)) {
6134 err = i40e_vsi_reconfig_rss(vsi, ch->num_queue_pairs);
6135 if (err) {
6136 dev_info(&pf->pdev->dev,
6137 "Error: unable to reconfig rss for num_queues (%u)\n",
6138 ch->num_queue_pairs);
6139 return -EINVAL;
6140 }
6141 }
6142
6143 if (!i40e_setup_channel(pf, vsi, ch)) {
6144 dev_info(&pf->pdev->dev, "Failed to setup channel\n");
6145 return -EINVAL;
6146 }
6147
6148 dev_info(&pf->pdev->dev,
6149 "Setup channel (id:%u) utilizing num_queues %d\n",
6150 ch->seid, ch->num_queue_pairs);
6151
2027d4de
AN
6152 /* configure VSI for BW limit */
6153 if (ch->max_tx_rate) {
6c32e0d9
AB
6154 u64 credits = ch->max_tx_rate;
6155
2027d4de
AN
6156 if (i40e_set_bw_limit(vsi, ch->seid, ch->max_tx_rate))
6157 return -EINVAL;
6158
6c32e0d9 6159 do_div(credits, I40E_BW_CREDIT_DIVISOR);
2027d4de
AN
6160 dev_dbg(&pf->pdev->dev,
6161 "Set tx rate of %llu Mbps (count of 50Mbps %llu) for vsi->seid %u\n",
6162 ch->max_tx_rate,
6c32e0d9
AB
6163 credits,
6164 ch->seid);
2027d4de
AN
6165 }
6166
8f88b303
AN
6167 /* in case of VF, this will be main SRIOV VSI */
6168 ch->parent_vsi = vsi;
6169
6170 /* and update main_vsi's count for queue_available to use */
6171 vsi->cnt_q_avail -= ch->num_queue_pairs;
6172
6173 return 0;
6174}
6175
6176/**
6177 * i40e_configure_queue_channels - Add queue channel for the given TCs
6178 * @vsi: VSI to be configured
6179 *
6180 * Configures queue channel mapping to the given TCs
6181 **/
6182static int i40e_configure_queue_channels(struct i40e_vsi *vsi)
6183{
6184 struct i40e_channel *ch;
6c32e0d9 6185 u64 max_rate = 0;
8f88b303
AN
6186 int ret = 0, i;
6187
6188 /* Create app vsi with the TCs. Main VSI with TC0 is already set up */
aa5cb02a 6189 vsi->tc_seid_map[0] = vsi->seid;
8f88b303
AN
6190 for (i = 1; i < I40E_MAX_TRAFFIC_CLASS; i++) {
6191 if (vsi->tc_config.enabled_tc & BIT(i)) {
6192 ch = kzalloc(sizeof(*ch), GFP_KERNEL);
6193 if (!ch) {
6194 ret = -ENOMEM;
6195 goto err_free;
6196 }
6197
6198 INIT_LIST_HEAD(&ch->list);
6199 ch->num_queue_pairs =
6200 vsi->tc_config.tc_info[i].qcount;
6201 ch->base_queue =
6202 vsi->tc_config.tc_info[i].qoffset;
6203
2027d4de
AN
6204 /* Bandwidth limit through tc interface is in bytes/s,
6205 * change to Mbit/s
6206 */
6c32e0d9
AB
6207 max_rate = vsi->mqprio_qopt.max_rate[i];
6208 do_div(max_rate, I40E_BW_MBPS_DIVISOR);
6209 ch->max_tx_rate = max_rate;
2027d4de 6210
8f88b303
AN
6211 list_add_tail(&ch->list, &vsi->ch_list);
6212
6213 ret = i40e_create_queue_channel(vsi, ch);
6214 if (ret) {
6215 dev_err(&vsi->back->pdev->dev,
6216 "Failed creating queue channel with TC%d: queues %d\n",
6217 i, ch->num_queue_pairs);
6218 goto err_free;
6219 }
aa5cb02a 6220 vsi->tc_seid_map[i] = ch->seid;
8f88b303
AN
6221 }
6222 }
6223 return ret;
6224
6225err_free:
6226 i40e_remove_queue_channels(vsi);
6227 return ret;
6228}
6229
4e3b35b0
NP
6230/**
6231 * i40e_veb_config_tc - Configure TCs for given VEB
6232 * @veb: given VEB
6233 * @enabled_tc: TC bitmap
6234 *
6235 * Configures given TC bitmap for VEB (switching) element
6236 **/
6237int i40e_veb_config_tc(struct i40e_veb *veb, u8 enabled_tc)
6238{
6239 struct i40e_aqc_configure_switching_comp_bw_config_data bw_data = {0};
6240 struct i40e_pf *pf = veb->pf;
6241 int ret = 0;
6242 int i;
6243
6244 /* No TCs or already enabled TCs just return */
6245 if (!enabled_tc || veb->enabled_tc == enabled_tc)
6246 return ret;
6247
6248 bw_data.tc_valid_bits = enabled_tc;
6249 /* bw_data.absolute_credits is not set (relative) */
6250
6251 /* Enable ETS TCs with equal BW Share for now */
6252 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
75f5cea9 6253 if (enabled_tc & BIT(i))
4e3b35b0
NP
6254 bw_data.tc_bw_share_credits[i] = 1;
6255 }
6256
6257 ret = i40e_aq_config_switch_comp_bw_config(&pf->hw, veb->seid,
6258 &bw_data, NULL);
6259 if (ret) {
6260 dev_info(&pf->pdev->dev,
f1c7e72e
SN
6261 "VEB bw config failed, err %s aq_err %s\n",
6262 i40e_stat_str(&pf->hw, ret),
6263 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
4e3b35b0
NP
6264 goto out;
6265 }
6266
6267 /* Update the BW information */
6268 ret = i40e_veb_get_bw_info(veb);
6269 if (ret) {
6270 dev_info(&pf->pdev->dev,
f1c7e72e
SN
6271 "Failed getting veb bw config, err %s aq_err %s\n",
6272 i40e_stat_str(&pf->hw, ret),
6273 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
4e3b35b0
NP
6274 }
6275
6276out:
6277 return ret;
6278}
6279
6280#ifdef CONFIG_I40E_DCB
6281/**
6282 * i40e_dcb_reconfigure - Reconfigure all VEBs and VSIs
6283 * @pf: PF struct
6284 *
6285 * Reconfigure VEB/VSIs on a given PF; it is assumed that
6286 * the caller would've quiesce all the VSIs before calling
6287 * this function
6288 **/
6289static void i40e_dcb_reconfigure(struct i40e_pf *pf)
6290{
6291 u8 tc_map = 0;
6292 int ret;
6293 u8 v;
6294
6295 /* Enable the TCs available on PF to all VEBs */
6296 tc_map = i40e_pf_get_tc_map(pf);
6297 for (v = 0; v < I40E_MAX_VEB; v++) {
6298 if (!pf->veb[v])
6299 continue;
6300 ret = i40e_veb_config_tc(pf->veb[v], tc_map);
6301 if (ret) {
6302 dev_info(&pf->pdev->dev,
6303 "Failed configuring TC for VEB seid=%d\n",
6304 pf->veb[v]->seid);
6305 /* Will try to configure as many components */
6306 }
6307 }
6308
6309 /* Update each VSI */
505682cd 6310 for (v = 0; v < pf->num_alloc_vsi; v++) {
4e3b35b0
NP
6311 if (!pf->vsi[v])
6312 continue;
6313
6314 /* - Enable all TCs for the LAN VSI
6315 * - For all others keep them at TC0 for now
6316 */
6317 if (v == pf->lan_vsi)
6318 tc_map = i40e_pf_get_tc_map(pf);
6319 else
ea6acb7e 6320 tc_map = I40E_DEFAULT_TRAFFIC_CLASS;
4e3b35b0
NP
6321
6322 ret = i40e_vsi_config_tc(pf->vsi[v], tc_map);
6323 if (ret) {
6324 dev_info(&pf->pdev->dev,
6325 "Failed configuring TC for VSI seid=%d\n",
6326 pf->vsi[v]->seid);
6327 /* Will try to configure as many components */
6328 } else {
0672a091
NP
6329 /* Re-configure VSI vectors based on updated TC map */
6330 i40e_vsi_map_rings_to_vectors(pf->vsi[v]);
4e3b35b0
NP
6331 if (pf->vsi[v]->netdev)
6332 i40e_dcbnl_set_all(pf->vsi[v]);
6333 }
6334 }
6335}
6336
2fd75f31
NP
6337/**
6338 * i40e_resume_port_tx - Resume port Tx
6339 * @pf: PF struct
6340 *
6341 * Resume a port's Tx and issue a PF reset in case of failure to
6342 * resume.
6343 **/
6344static int i40e_resume_port_tx(struct i40e_pf *pf)
6345{
6346 struct i40e_hw *hw = &pf->hw;
6347 int ret;
6348
6349 ret = i40e_aq_resume_port_tx(hw, NULL);
6350 if (ret) {
6351 dev_info(&pf->pdev->dev,
f1c7e72e
SN
6352 "Resume Port Tx failed, err %s aq_err %s\n",
6353 i40e_stat_str(&pf->hw, ret),
6354 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
2fd75f31 6355 /* Schedule PF reset to recover */
0da36b97 6356 set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
2fd75f31
NP
6357 i40e_service_event_schedule(pf);
6358 }
6359
6360 return ret;
6361}
6362
4e3b35b0
NP
6363/**
6364 * i40e_init_pf_dcb - Initialize DCB configuration
6365 * @pf: PF being configured
6366 *
6367 * Query the current DCB configuration and cache it
6368 * in the hardware structure
6369 **/
6370static int i40e_init_pf_dcb(struct i40e_pf *pf)
6371{
6372 struct i40e_hw *hw = &pf->hw;
6373 int err = 0;
6374
c61c8fe1
DE
6375 /* Do not enable DCB for SW1 and SW2 images even if the FW is capable
6376 * Also do not enable DCBx if FW LLDP agent is disabled
6377 */
6378 if ((pf->hw_features & I40E_HW_NO_DCB_SUPPORT) ||
6379 (pf->flags & I40E_FLAG_DISABLE_FW_LLDP))
025b4a54
ASJ
6380 goto out;
6381
4e3b35b0
NP
6382 /* Get the initial DCB configuration */
6383 err = i40e_init_dcb(hw);
6384 if (!err) {
6385 /* Device/Function is not DCBX capable */
6386 if ((!hw->func_caps.dcb) ||
6387 (hw->dcbx_status == I40E_DCBX_STATUS_DISABLED)) {
6388 dev_info(&pf->pdev->dev,
6389 "DCBX offload is not supported or is disabled for this PF.\n");
4e3b35b0
NP
6390 } else {
6391 /* When status is not DISABLED then DCBX in FW */
6392 pf->dcbx_cap = DCB_CAP_DCBX_LLD_MANAGED |
6393 DCB_CAP_DCBX_VER_IEEE;
4d9b6043
NP
6394
6395 pf->flags |= I40E_FLAG_DCB_CAPABLE;
a036244c
DE
6396 /* Enable DCB tagging only when more than one TC
6397 * or explicitly disable if only one TC
6398 */
4d9b6043
NP
6399 if (i40e_dcb_get_num_tc(&hw->local_dcbx_config) > 1)
6400 pf->flags |= I40E_FLAG_DCB_ENABLED;
a036244c
DE
6401 else
6402 pf->flags &= ~I40E_FLAG_DCB_ENABLED;
9fa61dd2
NP
6403 dev_dbg(&pf->pdev->dev,
6404 "DCBX offload is supported for this PF.\n");
4e3b35b0 6405 }
64e1dcbb
AB
6406 } else if (pf->hw.aq.asq_last_status == I40E_AQ_RC_EPERM) {
6407 dev_info(&pf->pdev->dev, "FW LLDP disabled for this PF.\n");
6408 pf->flags |= I40E_FLAG_DISABLE_FW_LLDP;
014269ff 6409 } else {
aebfc816 6410 dev_info(&pf->pdev->dev,
f1c7e72e
SN
6411 "Query for DCB configuration failed, err %s aq_err %s\n",
6412 i40e_stat_str(&pf->hw, err),
6413 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
4e3b35b0
NP
6414 }
6415
6416out:
6417 return err;
6418}
6419#endif /* CONFIG_I40E_DCB */
cf05ed08
JB
6420#define SPEED_SIZE 14
6421#define FC_SIZE 8
6422/**
6423 * i40e_print_link_message - print link up or down
6424 * @vsi: the VSI for which link needs a message
f5254429 6425 * @isup: true of link is up, false otherwise
cf05ed08 6426 */
c156f856 6427void i40e_print_link_message(struct i40e_vsi *vsi, bool isup)
cf05ed08 6428{
7ec9ba11 6429 enum i40e_aq_link_speed new_speed;
3fded466 6430 struct i40e_pf *pf = vsi->back;
a9165490
SN
6431 char *speed = "Unknown";
6432 char *fc = "Unknown";
3e03d7cc 6433 char *fec = "";
68e49702 6434 char *req_fec = "";
3e03d7cc 6435 char *an = "";
cf05ed08 6436
fd835129
SN
6437 if (isup)
6438 new_speed = pf->hw.phy.link_info.link_speed;
6439 else
6440 new_speed = I40E_LINK_SPEED_UNKNOWN;
7ec9ba11
FS
6441
6442 if ((vsi->current_isup == isup) && (vsi->current_speed == new_speed))
c156f856
MJ
6443 return;
6444 vsi->current_isup = isup;
7ec9ba11 6445 vsi->current_speed = new_speed;
cf05ed08
JB
6446 if (!isup) {
6447 netdev_info(vsi->netdev, "NIC Link is Down\n");
6448 return;
6449 }
6450
148c2d80
GR
6451 /* Warn user if link speed on NPAR enabled partition is not at
6452 * least 10GB
6453 */
3fded466
SM
6454 if (pf->hw.func_caps.npar_enable &&
6455 (pf->hw.phy.link_info.link_speed == I40E_LINK_SPEED_1GB ||
6456 pf->hw.phy.link_info.link_speed == I40E_LINK_SPEED_100MB))
148c2d80
GR
6457 netdev_warn(vsi->netdev,
6458 "The partition detected link speed that is less than 10Gbps\n");
6459
3fded466 6460 switch (pf->hw.phy.link_info.link_speed) {
cf05ed08 6461 case I40E_LINK_SPEED_40GB:
a9165490 6462 speed = "40 G";
cf05ed08 6463 break;
ae24b409 6464 case I40E_LINK_SPEED_20GB:
a9165490 6465 speed = "20 G";
ae24b409 6466 break;
3123237a
CW
6467 case I40E_LINK_SPEED_25GB:
6468 speed = "25 G";
6469 break;
cf05ed08 6470 case I40E_LINK_SPEED_10GB:
a9165490 6471 speed = "10 G";
cf05ed08
JB
6472 break;
6473 case I40E_LINK_SPEED_1GB:
a9165490 6474 speed = "1000 M";
cf05ed08 6475 break;
5960d33f 6476 case I40E_LINK_SPEED_100MB:
a9165490 6477 speed = "100 M";
5960d33f 6478 break;
cf05ed08
JB
6479 default:
6480 break;
6481 }
6482
3fded466 6483 switch (pf->hw.fc.current_mode) {
cf05ed08 6484 case I40E_FC_FULL:
a9165490 6485 fc = "RX/TX";
cf05ed08
JB
6486 break;
6487 case I40E_FC_TX_PAUSE:
a9165490 6488 fc = "TX";
cf05ed08
JB
6489 break;
6490 case I40E_FC_RX_PAUSE:
a9165490 6491 fc = "RX";
cf05ed08
JB
6492 break;
6493 default:
a9165490 6494 fc = "None";
cf05ed08
JB
6495 break;
6496 }
6497
3fded466 6498 if (pf->hw.phy.link_info.link_speed == I40E_LINK_SPEED_25GB) {
68e49702 6499 req_fec = ", Requested FEC: None";
3e03d7cc
HT
6500 fec = ", FEC: None";
6501 an = ", Autoneg: False";
6502
3fded466 6503 if (pf->hw.phy.link_info.an_info & I40E_AQ_AN_COMPLETED)
3e03d7cc
HT
6504 an = ", Autoneg: True";
6505
3fded466 6506 if (pf->hw.phy.link_info.fec_info &
3e03d7cc
HT
6507 I40E_AQ_CONFIG_FEC_KR_ENA)
6508 fec = ", FEC: CL74 FC-FEC/BASE-R";
3fded466 6509 else if (pf->hw.phy.link_info.fec_info &
3e03d7cc
HT
6510 I40E_AQ_CONFIG_FEC_RS_ENA)
6511 fec = ", FEC: CL108 RS-FEC";
68e49702
MS
6512
6513 /* 'CL108 RS-FEC' should be displayed when RS is requested, or
6514 * both RS and FC are requested
6515 */
6516 if (vsi->back->hw.phy.link_info.req_fec_info &
6517 (I40E_AQ_REQUEST_FEC_KR | I40E_AQ_REQUEST_FEC_RS)) {
6518 if (vsi->back->hw.phy.link_info.req_fec_info &
6519 I40E_AQ_REQUEST_FEC_RS)
6520 req_fec = ", Requested FEC: CL108 RS-FEC";
6521 else
6522 req_fec = ", Requested FEC: CL74 FC-FEC/BASE-R";
6523 }
3e03d7cc
HT
6524 }
6525
68e49702
MS
6526 netdev_info(vsi->netdev, "NIC Link is Up, %sbps Full Duplex%s%s%s, Flow Control: %s\n",
6527 speed, req_fec, fec, an, fc);
cf05ed08 6528}
4e3b35b0 6529
41c445ff
JB
6530/**
6531 * i40e_up_complete - Finish the last steps of bringing up a connection
6532 * @vsi: the VSI being configured
6533 **/
6534static int i40e_up_complete(struct i40e_vsi *vsi)
6535{
6536 struct i40e_pf *pf = vsi->back;
6537 int err;
6538
6539 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
6540 i40e_vsi_configure_msix(vsi);
6541 else
6542 i40e_configure_msi_and_legacy(vsi);
6543
6544 /* start rings */
3aa7b74d 6545 err = i40e_vsi_start_rings(vsi);
41c445ff
JB
6546 if (err)
6547 return err;
6548
0da36b97 6549 clear_bit(__I40E_VSI_DOWN, vsi->state);
41c445ff
JB
6550 i40e_napi_enable_all(vsi);
6551 i40e_vsi_enable_irq(vsi);
6552
6553 if ((pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP) &&
6554 (vsi->netdev)) {
cf05ed08 6555 i40e_print_link_message(vsi, true);
41c445ff
JB
6556 netif_tx_start_all_queues(vsi->netdev);
6557 netif_carrier_on(vsi->netdev);
6558 }
ca64fa4e
ASJ
6559
6560 /* replay FDIR SB filters */
1e1be8f6
ASJ
6561 if (vsi->type == I40E_VSI_FDIR) {
6562 /* reset fd counters */
097dbf52
JK
6563 pf->fd_add_err = 0;
6564 pf->fd_atr_cnt = 0;
ca64fa4e 6565 i40e_fdir_filter_restore(vsi);
1e1be8f6 6566 }
e3219ce6
ASJ
6567
6568 /* On the next run of the service_task, notify any clients of the new
6569 * opened netdev
6570 */
5f76a704 6571 set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state);
41c445ff
JB
6572 i40e_service_event_schedule(pf);
6573
6574 return 0;
6575}
6576
6577/**
6578 * i40e_vsi_reinit_locked - Reset the VSI
6579 * @vsi: the VSI being configured
6580 *
6581 * Rebuild the ring structs after some configuration
6582 * has changed, e.g. MTU size.
6583 **/
6584static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi)
6585{
6586 struct i40e_pf *pf = vsi->back;
6587
6588 WARN_ON(in_interrupt());
0da36b97 6589 while (test_and_set_bit(__I40E_CONFIG_BUSY, pf->state))
41c445ff
JB
6590 usleep_range(1000, 2000);
6591 i40e_down(vsi);
6592
41c445ff 6593 i40e_up(vsi);
0da36b97 6594 clear_bit(__I40E_CONFIG_BUSY, pf->state);
41c445ff
JB
6595}
6596
6597/**
6598 * i40e_up - Bring the connection back up after being down
6599 * @vsi: the VSI being configured
6600 **/
6601int i40e_up(struct i40e_vsi *vsi)
6602{
6603 int err;
6604
6605 err = i40e_vsi_configure(vsi);
6606 if (!err)
6607 err = i40e_up_complete(vsi);
6608
6609 return err;
6610}
6611
c3880bd1
MS
6612/**
6613 * i40e_force_link_state - Force the link status
6614 * @pf: board private structure
6615 * @is_up: whether the link state should be forced up or down
6616 **/
6617static i40e_status i40e_force_link_state(struct i40e_pf *pf, bool is_up)
6618{
6619 struct i40e_aq_get_phy_abilities_resp abilities;
6620 struct i40e_aq_set_phy_config config = {0};
6621 struct i40e_hw *hw = &pf->hw;
6622 i40e_status err;
6623 u64 mask;
e78d9a39
JS
6624 u8 speed;
6625
6626 /* Card might've been put in an unstable state by other drivers
6627 * and applications, which causes incorrect speed values being
6628 * set on startup. In order to clear speed registers, we call
6629 * get_phy_capabilities twice, once to get initial state of
6630 * available speeds, and once to get current PHY config.
6631 */
6632 err = i40e_aq_get_phy_capabilities(hw, false, true, &abilities,
6633 NULL);
6634 if (err) {
6635 dev_err(&pf->pdev->dev,
6636 "failed to get phy cap., ret = %s last_status = %s\n",
6637 i40e_stat_str(hw, err),
6638 i40e_aq_str(hw, hw->aq.asq_last_status));
6639 return err;
6640 }
6641 speed = abilities.link_speed;
c3880bd1
MS
6642
6643 /* Get the current phy config */
6644 err = i40e_aq_get_phy_capabilities(hw, false, false, &abilities,
6645 NULL);
6646 if (err) {
6647 dev_err(&pf->pdev->dev,
6648 "failed to get phy cap., ret = %s last_status = %s\n",
6649 i40e_stat_str(hw, err),
6650 i40e_aq_str(hw, hw->aq.asq_last_status));
6651 return err;
6652 }
6653
6654 /* If link needs to go up, but was not forced to go down,
e78d9a39 6655 * and its speed values are OK, no need for a flap
c3880bd1 6656 */
e78d9a39 6657 if (is_up && abilities.phy_type != 0 && abilities.link_speed != 0)
c3880bd1
MS
6658 return I40E_SUCCESS;
6659
6660 /* To force link we need to set bits for all supported PHY types,
6661 * but there are now more than 32, so we need to split the bitmap
6662 * across two fields.
6663 */
6664 mask = I40E_PHY_TYPES_BITMASK;
6665 config.phy_type = is_up ? cpu_to_le32((u32)(mask & 0xffffffff)) : 0;
6666 config.phy_type_ext = is_up ? (u8)((mask >> 32) & 0xff) : 0;
6667 /* Copy the old settings, except of phy_type */
6668 config.abilities = abilities.abilities;
e78d9a39
JS
6669 if (abilities.link_speed != 0)
6670 config.link_speed = abilities.link_speed;
6671 else
6672 config.link_speed = speed;
c3880bd1
MS
6673 config.eee_capability = abilities.eee_capability;
6674 config.eeer = abilities.eeer_val;
6675 config.low_power_ctrl = abilities.d3_lpan;
1ac2ee23
MS
6676 config.fec_config = abilities.fec_cfg_curr_mod_ext_info &
6677 I40E_AQ_PHY_FEC_CONFIG_MASK;
c3880bd1
MS
6678 err = i40e_aq_set_phy_config(hw, &config, NULL);
6679
6680 if (err) {
6681 dev_err(&pf->pdev->dev,
6682 "set phy config ret = %s last_status = %s\n",
6683 i40e_stat_str(&pf->hw, err),
6684 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6685 return err;
6686 }
6687
6688 /* Update the link info */
6689 err = i40e_update_link_info(hw);
6690 if (err) {
6691 /* Wait a little bit (on 40G cards it sometimes takes a really
6692 * long time for link to come back from the atomic reset)
6693 * and try once more
6694 */
6695 msleep(1000);
6696 i40e_update_link_info(hw);
6697 }
6698
6699 i40e_aq_set_link_restart_an(hw, true, NULL);
6700
6701 return I40E_SUCCESS;
6702}
6703
41c445ff
JB
6704/**
6705 * i40e_down - Shutdown the connection processing
6706 * @vsi: the VSI being stopped
6707 **/
6708void i40e_down(struct i40e_vsi *vsi)
6709{
6710 int i;
6711
6712 /* It is assumed that the caller of this function
d19cb64b 6713 * sets the vsi->state __I40E_VSI_DOWN bit.
41c445ff
JB
6714 */
6715 if (vsi->netdev) {
6716 netif_carrier_off(vsi->netdev);
6717 netif_tx_disable(vsi->netdev);
6718 }
6719 i40e_vsi_disable_irq(vsi);
3aa7b74d 6720 i40e_vsi_stop_rings(vsi);
c3880bd1
MS
6721 if (vsi->type == I40E_VSI_MAIN &&
6722 vsi->back->flags & I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED)
6723 i40e_force_link_state(vsi->back, false);
41c445ff
JB
6724 i40e_napi_disable_all(vsi);
6725
6726 for (i = 0; i < vsi->num_queue_pairs; i++) {
9f65e15b 6727 i40e_clean_tx_ring(vsi->tx_rings[i]);
74608d17
BT
6728 if (i40e_enabled_xdp_vsi(vsi))
6729 i40e_clean_tx_ring(vsi->xdp_rings[i]);
9f65e15b 6730 i40e_clean_rx_ring(vsi->rx_rings[i]);
41c445ff 6731 }
f980d445 6732
41c445ff
JB
6733}
6734
a9ce82f7
AN
6735/**
6736 * i40e_validate_mqprio_qopt- validate queue mapping info
6737 * @vsi: the VSI being configured
6738 * @mqprio_qopt: queue parametrs
6739 **/
6740static int i40e_validate_mqprio_qopt(struct i40e_vsi *vsi,
6741 struct tc_mqprio_qopt_offload *mqprio_qopt)
6742{
2027d4de 6743 u64 sum_max_rate = 0;
6c32e0d9 6744 u64 max_rate = 0;
a9ce82f7
AN
6745 int i;
6746
6747 if (mqprio_qopt->qopt.offset[0] != 0 ||
6748 mqprio_qopt->qopt.num_tc < 1 ||
6749 mqprio_qopt->qopt.num_tc > I40E_MAX_TRAFFIC_CLASS)
6750 return -EINVAL;
6751 for (i = 0; ; i++) {
6752 if (!mqprio_qopt->qopt.count[i])
6753 return -EINVAL;
2027d4de
AN
6754 if (mqprio_qopt->min_rate[i]) {
6755 dev_err(&vsi->back->pdev->dev,
6756 "Invalid min tx rate (greater than 0) specified\n");
a9ce82f7 6757 return -EINVAL;
2027d4de 6758 }
6c32e0d9
AB
6759 max_rate = mqprio_qopt->max_rate[i];
6760 do_div(max_rate, I40E_BW_MBPS_DIVISOR);
6761 sum_max_rate += max_rate;
2027d4de 6762
a9ce82f7
AN
6763 if (i >= mqprio_qopt->qopt.num_tc - 1)
6764 break;
6765 if (mqprio_qopt->qopt.offset[i + 1] !=
6766 (mqprio_qopt->qopt.offset[i] + mqprio_qopt->qopt.count[i]))
6767 return -EINVAL;
6768 }
6769 if (vsi->num_queue_pairs <
6770 (mqprio_qopt->qopt.offset[i] + mqprio_qopt->qopt.count[i])) {
6771 return -EINVAL;
6772 }
2027d4de
AN
6773 if (sum_max_rate > i40e_get_link_speed(vsi)) {
6774 dev_err(&vsi->back->pdev->dev,
6775 "Invalid max tx rate specified\n");
6776 return -EINVAL;
6777 }
a9ce82f7
AN
6778 return 0;
6779}
6780
6781/**
6782 * i40e_vsi_set_default_tc_config - set default values for tc configuration
6783 * @vsi: the VSI being configured
6784 **/
6785static void i40e_vsi_set_default_tc_config(struct i40e_vsi *vsi)
6786{
6787 u16 qcount;
6788 int i;
6789
6790 /* Only TC0 is enabled */
6791 vsi->tc_config.numtc = 1;
6792 vsi->tc_config.enabled_tc = 1;
6793 qcount = min_t(int, vsi->alloc_queue_pairs,
6794 i40e_pf_get_max_q_per_tc(vsi->back));
6795 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
6796 /* For the TC that is not enabled set the offset to to default
6797 * queue and allocate one queue for the given TC.
6798 */
6799 vsi->tc_config.tc_info[i].qoffset = 0;
6800 if (i == 0)
6801 vsi->tc_config.tc_info[i].qcount = qcount;
6802 else
6803 vsi->tc_config.tc_info[i].qcount = 1;
6804 vsi->tc_config.tc_info[i].netdev_tc = 0;
6805 }
6806}
6807
41c445ff
JB
6808/**
6809 * i40e_setup_tc - configure multiple traffic classes
6810 * @netdev: net device to configure
a9ce82f7 6811 * @type_data: tc offload data
41c445ff 6812 **/
a9ce82f7 6813static int i40e_setup_tc(struct net_device *netdev, void *type_data)
41c445ff 6814{
a9ce82f7 6815 struct tc_mqprio_qopt_offload *mqprio_qopt = type_data;
41c445ff
JB
6816 struct i40e_netdev_priv *np = netdev_priv(netdev);
6817 struct i40e_vsi *vsi = np->vsi;
6818 struct i40e_pf *pf = vsi->back;
a9ce82f7
AN
6819 u8 enabled_tc = 0, num_tc, hw;
6820 bool need_reset = false;
41c445ff 6821 int ret = -EINVAL;
a9ce82f7 6822 u16 mode;
41c445ff
JB
6823 int i;
6824
a9ce82f7
AN
6825 num_tc = mqprio_qopt->qopt.num_tc;
6826 hw = mqprio_qopt->qopt.hw;
6827 mode = mqprio_qopt->mode;
6828 if (!hw) {
6829 pf->flags &= ~I40E_FLAG_TC_MQPRIO;
6830 memcpy(&vsi->mqprio_qopt, mqprio_qopt, sizeof(*mqprio_qopt));
6831 goto config_tc;
41c445ff
JB
6832 }
6833
6834 /* Check if MFP enabled */
6835 if (pf->flags & I40E_FLAG_MFP_ENABLED) {
a9ce82f7
AN
6836 netdev_info(netdev,
6837 "Configuring TC not supported in MFP mode\n");
6838 return ret;
41c445ff 6839 }
a9ce82f7
AN
6840 switch (mode) {
6841 case TC_MQPRIO_MODE_DCB:
6842 pf->flags &= ~I40E_FLAG_TC_MQPRIO;
41c445ff 6843
a9ce82f7
AN
6844 /* Check if DCB enabled to continue */
6845 if (!(pf->flags & I40E_FLAG_DCB_ENABLED)) {
6846 netdev_info(netdev,
6847 "DCB is not enabled for adapter\n");
6848 return ret;
6849 }
6850
6851 /* Check whether tc count is within enabled limit */
6852 if (num_tc > i40e_pf_get_num_tc(pf)) {
6853 netdev_info(netdev,
6854 "TC count greater than enabled on link for adapter\n");
6855 return ret;
6856 }
6857 break;
6858 case TC_MQPRIO_MODE_CHANNEL:
6859 if (pf->flags & I40E_FLAG_DCB_ENABLED) {
6860 netdev_info(netdev,
6861 "Full offload of TC Mqprio options is not supported when DCB is enabled\n");
6862 return ret;
6863 }
6864 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED))
6865 return ret;
6866 ret = i40e_validate_mqprio_qopt(vsi, mqprio_qopt);
6867 if (ret)
6868 return ret;
6869 memcpy(&vsi->mqprio_qopt, mqprio_qopt,
6870 sizeof(*mqprio_qopt));
6871 pf->flags |= I40E_FLAG_TC_MQPRIO;
6872 pf->flags &= ~I40E_FLAG_DCB_ENABLED;
6873 break;
6874 default:
6875 return -EINVAL;
41c445ff
JB
6876 }
6877
a9ce82f7 6878config_tc:
41c445ff 6879 /* Generate TC map for number of tc requested */
a9ce82f7 6880 for (i = 0; i < num_tc; i++)
75f5cea9 6881 enabled_tc |= BIT(i);
41c445ff
JB
6882
6883 /* Requesting same TC configuration as already enabled */
a9ce82f7
AN
6884 if (enabled_tc == vsi->tc_config.enabled_tc &&
6885 mode != TC_MQPRIO_MODE_CHANNEL)
41c445ff
JB
6886 return 0;
6887
6888 /* Quiesce VSI queues */
6889 i40e_quiesce_vsi(vsi);
6890
a9ce82f7
AN
6891 if (!hw && !(pf->flags & I40E_FLAG_TC_MQPRIO))
6892 i40e_remove_queue_channels(vsi);
6893
41c445ff
JB
6894 /* Configure VSI for enabled TCs */
6895 ret = i40e_vsi_config_tc(vsi, enabled_tc);
6896 if (ret) {
6897 netdev_info(netdev, "Failed configuring TC for VSI seid=%d\n",
6898 vsi->seid);
a9ce82f7 6899 need_reset = true;
41c445ff
JB
6900 goto exit;
6901 }
6902
8f88b303 6903 if (pf->flags & I40E_FLAG_TC_MQPRIO) {
2027d4de 6904 if (vsi->mqprio_qopt.max_rate[0]) {
6c32e0d9
AB
6905 u64 max_tx_rate = vsi->mqprio_qopt.max_rate[0];
6906
6907 do_div(max_tx_rate, I40E_BW_MBPS_DIVISOR);
2027d4de
AN
6908 ret = i40e_set_bw_limit(vsi, vsi->seid, max_tx_rate);
6909 if (!ret) {
6c32e0d9
AB
6910 u64 credits = max_tx_rate;
6911
6912 do_div(credits, I40E_BW_CREDIT_DIVISOR);
2027d4de
AN
6913 dev_dbg(&vsi->back->pdev->dev,
6914 "Set tx rate of %llu Mbps (count of 50Mbps %llu) for vsi->seid %u\n",
6915 max_tx_rate,
6c32e0d9 6916 credits,
2027d4de
AN
6917 vsi->seid);
6918 } else {
6919 need_reset = true;
6920 goto exit;
6921 }
6922 }
8f88b303 6923 ret = i40e_configure_queue_channels(vsi);
4e3b35b0 6924 if (ret) {
8f88b303
AN
6925 netdev_info(netdev,
6926 "Failed configuring queue channels\n");
a9ce82f7 6927 need_reset = true;
8f88b303 6928 goto exit;
4e3b35b0
NP
6929 }
6930 }
6931
41c445ff 6932exit:
a9ce82f7
AN
6933 /* Reset the configuration data to defaults, only TC0 is enabled */
6934 if (need_reset) {
6935 i40e_vsi_set_default_tc_config(vsi);
6936 need_reset = false;
6937 }
4e3b35b0 6938
8f88b303
AN
6939 /* Unquiesce VSI */
6940 i40e_unquiesce_vsi(vsi);
41c445ff
JB
6941 return ret;
6942}
4e3b35b0 6943
2f4b411a
AN
6944/**
6945 * i40e_set_cld_element - sets cloud filter element data
6946 * @filter: cloud filter rule
6947 * @cld: ptr to cloud filter element data
6948 *
6949 * This is helper function to copy data into cloud filter element
6950 **/
6951static inline void
6952i40e_set_cld_element(struct i40e_cloud_filter *filter,
6953 struct i40e_aqc_cloud_filters_element_data *cld)
6954{
6955 int i, j;
6956 u32 ipa;
6957
6958 memset(cld, 0, sizeof(*cld));
6959 ether_addr_copy(cld->outer_mac, filter->dst_mac);
6960 ether_addr_copy(cld->inner_mac, filter->src_mac);
6961
6962 if (filter->n_proto != ETH_P_IP && filter->n_proto != ETH_P_IPV6)
6963 return;
6964
6965 if (filter->n_proto == ETH_P_IPV6) {
6966#define IPV6_MAX_INDEX (ARRAY_SIZE(filter->dst_ipv6) - 1)
6967 for (i = 0, j = 0; i < ARRAY_SIZE(filter->dst_ipv6);
6968 i++, j += 2) {
6969 ipa = be32_to_cpu(filter->dst_ipv6[IPV6_MAX_INDEX - i]);
6970 ipa = cpu_to_le32(ipa);
6971 memcpy(&cld->ipaddr.raw_v6.data[j], &ipa, sizeof(ipa));
4e3b35b0 6972 }
2f4b411a
AN
6973 } else {
6974 ipa = be32_to_cpu(filter->dst_ipv4);
6975 memcpy(&cld->ipaddr.v4.data, &ipa, sizeof(ipa));
4e3b35b0 6976 }
2f4b411a
AN
6977
6978 cld->inner_vlan = cpu_to_le16(ntohs(filter->vlan_id));
6979
6980 /* tenant_id is not supported by FW now, once the support is enabled
6981 * fill the cld->tenant_id with cpu_to_le32(filter->tenant_id)
6982 */
6983 if (filter->tenant_id)
6984 return;
4e3b35b0
NP
6985}
6986
2fd75f31 6987/**
2f4b411a
AN
6988 * i40e_add_del_cloud_filter - Add/del cloud filter
6989 * @vsi: pointer to VSI
6990 * @filter: cloud filter rule
6991 * @add: if true, add, if false, delete
2fd75f31 6992 *
2f4b411a
AN
6993 * Add or delete a cloud filter for a specific flow spec.
6994 * Returns 0 if the filter were successfully added.
2fd75f31 6995 **/
e284fc28
AD
6996int i40e_add_del_cloud_filter(struct i40e_vsi *vsi,
6997 struct i40e_cloud_filter *filter, bool add)
2fd75f31 6998{
2f4b411a
AN
6999 struct i40e_aqc_cloud_filters_element_data cld_filter;
7000 struct i40e_pf *pf = vsi->back;
2fd75f31 7001 int ret;
2f4b411a
AN
7002 static const u16 flag_table[128] = {
7003 [I40E_CLOUD_FILTER_FLAGS_OMAC] =
7004 I40E_AQC_ADD_CLOUD_FILTER_OMAC,
7005 [I40E_CLOUD_FILTER_FLAGS_IMAC] =
7006 I40E_AQC_ADD_CLOUD_FILTER_IMAC,
7007 [I40E_CLOUD_FILTER_FLAGS_IMAC_IVLAN] =
7008 I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN,
7009 [I40E_CLOUD_FILTER_FLAGS_IMAC_TEN_ID] =
7010 I40E_AQC_ADD_CLOUD_FILTER_IMAC_TEN_ID,
7011 [I40E_CLOUD_FILTER_FLAGS_OMAC_TEN_ID_IMAC] =
7012 I40E_AQC_ADD_CLOUD_FILTER_OMAC_TEN_ID_IMAC,
7013 [I40E_CLOUD_FILTER_FLAGS_IMAC_IVLAN_TEN_ID] =
7014 I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID,
7015 [I40E_CLOUD_FILTER_FLAGS_IIP] =
7016 I40E_AQC_ADD_CLOUD_FILTER_IIP,
7017 };
7018
7019 if (filter->flags >= ARRAY_SIZE(flag_table))
7020 return I40E_ERR_CONFIG;
7021
7022 /* copy element needed to add cloud filter from filter */
7023 i40e_set_cld_element(filter, &cld_filter);
7024
7025 if (filter->tunnel_type != I40E_CLOUD_TNL_TYPE_NONE)
7026 cld_filter.flags = cpu_to_le16(filter->tunnel_type <<
7027 I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT);
7028
7029 if (filter->n_proto == ETH_P_IPV6)
7030 cld_filter.flags |= cpu_to_le16(flag_table[filter->flags] |
7031 I40E_AQC_ADD_CLOUD_FLAGS_IPV6);
7032 else
7033 cld_filter.flags |= cpu_to_le16(flag_table[filter->flags] |
7034 I40E_AQC_ADD_CLOUD_FLAGS_IPV4);
2fd75f31 7035
2f4b411a
AN
7036 if (add)
7037 ret = i40e_aq_add_cloud_filters(&pf->hw, filter->seid,
7038 &cld_filter, 1);
7039 else
7040 ret = i40e_aq_rem_cloud_filters(&pf->hw, filter->seid,
7041 &cld_filter, 1);
7042 if (ret)
7043 dev_dbg(&pf->pdev->dev,
7044 "Failed to %s cloud filter using l4 port %u, err %d aq_err %d\n",
7045 add ? "add" : "delete", filter->dst_port, ret,
7046 pf->hw.aq.asq_last_status);
7047 else
2fd75f31 7048 dev_info(&pf->pdev->dev,
2f4b411a
AN
7049 "%s cloud filter for VSI: %d\n",
7050 add ? "Added" : "Deleted", filter->seid);
2fd75f31
NP
7051 return ret;
7052}
7053
4e3b35b0 7054/**
2f4b411a
AN
7055 * i40e_add_del_cloud_filter_big_buf - Add/del cloud filter using big_buf
7056 * @vsi: pointer to VSI
7057 * @filter: cloud filter rule
7058 * @add: if true, add, if false, delete
4e3b35b0 7059 *
2f4b411a
AN
7060 * Add or delete a cloud filter for a specific flow spec using big buffer.
7061 * Returns 0 if the filter were successfully added.
4e3b35b0 7062 **/
e284fc28
AD
7063int i40e_add_del_cloud_filter_big_buf(struct i40e_vsi *vsi,
7064 struct i40e_cloud_filter *filter,
7065 bool add)
4e3b35b0 7066{
2f4b411a
AN
7067 struct i40e_aqc_cloud_filters_element_bb cld_filter;
7068 struct i40e_pf *pf = vsi->back;
7069 int ret;
4e3b35b0 7070
2f4b411a
AN
7071 /* Both (src/dst) valid mac_addr are not supported */
7072 if ((is_valid_ether_addr(filter->dst_mac) &&
7073 is_valid_ether_addr(filter->src_mac)) ||
7074 (is_multicast_ether_addr(filter->dst_mac) &&
7075 is_multicast_ether_addr(filter->src_mac)))
64e711ca 7076 return -EOPNOTSUPP;
025b4a54 7077
64e711ca
AN
7078 /* Big buffer cloud filter needs 'L4 port' to be non-zero. Also, UDP
7079 * ports are not supported via big buffer now.
2f4b411a 7080 */
64e711ca
AN
7081 if (!filter->dst_port || filter->ip_proto == IPPROTO_UDP)
7082 return -EOPNOTSUPP;
4d9b6043 7083
2f4b411a
AN
7084 /* adding filter using src_port/src_ip is not supported at this stage */
7085 if (filter->src_port || filter->src_ipv4 ||
7086 !ipv6_addr_any(&filter->ip.v6.src_ip6))
64e711ca 7087 return -EOPNOTSUPP;
2f4b411a
AN
7088
7089 /* copy element needed to add cloud filter from filter */
7090 i40e_set_cld_element(filter, &cld_filter.element);
7091
7092 if (is_valid_ether_addr(filter->dst_mac) ||
7093 is_valid_ether_addr(filter->src_mac) ||
7094 is_multicast_ether_addr(filter->dst_mac) ||
7095 is_multicast_ether_addr(filter->src_mac)) {
7096 /* MAC + IP : unsupported mode */
7097 if (filter->dst_ipv4)
64e711ca 7098 return -EOPNOTSUPP;
2f4b411a
AN
7099
7100 /* since we validated that L4 port must be valid before
7101 * we get here, start with respective "flags" value
7102 * and update if vlan is present or not
7103 */
7104 cld_filter.element.flags =
7105 cpu_to_le16(I40E_AQC_ADD_CLOUD_FILTER_MAC_PORT);
7106
7107 if (filter->vlan_id) {
7108 cld_filter.element.flags =
7109 cpu_to_le16(I40E_AQC_ADD_CLOUD_FILTER_MAC_VLAN_PORT);
4e3b35b0 7110 }
2f4b411a
AN
7111
7112 } else if (filter->dst_ipv4 ||
7113 !ipv6_addr_any(&filter->ip.v6.dst_ip6)) {
7114 cld_filter.element.flags =
7115 cpu_to_le16(I40E_AQC_ADD_CLOUD_FILTER_IP_PORT);
7116 if (filter->n_proto == ETH_P_IPV6)
7117 cld_filter.element.flags |=
7118 cpu_to_le16(I40E_AQC_ADD_CLOUD_FLAGS_IPV6);
7119 else
7120 cld_filter.element.flags |=
7121 cpu_to_le16(I40E_AQC_ADD_CLOUD_FLAGS_IPV4);
014269ff 7122 } else {
2f4b411a
AN
7123 dev_err(&pf->pdev->dev,
7124 "either mac or ip has to be valid for cloud filter\n");
7125 return -EINVAL;
4e3b35b0
NP
7126 }
7127
2f4b411a
AN
7128 /* Now copy L4 port in Byte 6..7 in general fields */
7129 cld_filter.general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD0] =
7130 be16_to_cpu(filter->dst_port);
7131
7132 if (add) {
7133 /* Validate current device switch mode, change if necessary */
7134 ret = i40e_validate_and_set_switch_mode(vsi);
7135 if (ret) {
7136 dev_err(&pf->pdev->dev,
7137 "failed to set switch mode, ret %d\n",
7138 ret);
7139 return ret;
7140 }
7141
7142 ret = i40e_aq_add_cloud_filters_bb(&pf->hw, filter->seid,
7143 &cld_filter, 1);
7144 } else {
7145 ret = i40e_aq_rem_cloud_filters_bb(&pf->hw, filter->seid,
7146 &cld_filter, 1);
7147 }
7148
7149 if (ret)
7150 dev_dbg(&pf->pdev->dev,
7151 "Failed to %s cloud filter(big buffer) err %d aq_err %d\n",
7152 add ? "add" : "delete", ret, pf->hw.aq.asq_last_status);
7153 else
7154 dev_info(&pf->pdev->dev,
7155 "%s cloud filter for VSI: %d, L4 port: %d\n",
7156 add ? "add" : "delete", filter->seid,
7157 ntohs(filter->dst_port));
7158 return ret;
4e3b35b0 7159}
2f4b411a 7160
cf05ed08 7161/**
2f4b411a
AN
7162 * i40e_parse_cls_flower - Parse tc flower filters provided by kernel
7163 * @vsi: Pointer to VSI
7164 * @cls_flower: Pointer to struct tc_cls_flower_offload
7165 * @filter: Pointer to cloud filter structure
7166 *
7167 **/
7168static int i40e_parse_cls_flower(struct i40e_vsi *vsi,
7169 struct tc_cls_flower_offload *f,
7170 struct i40e_cloud_filter *filter)
cf05ed08 7171{
2f4b411a
AN
7172 u16 n_proto_mask = 0, n_proto_key = 0, addr_type = 0;
7173 struct i40e_pf *pf = vsi->back;
7174 u8 field_flags = 0;
7175
7176 if (f->dissector->used_keys &
7177 ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
7178 BIT(FLOW_DISSECTOR_KEY_BASIC) |
7179 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
7180 BIT(FLOW_DISSECTOR_KEY_VLAN) |
7181 BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
7182 BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
7183 BIT(FLOW_DISSECTOR_KEY_PORTS) |
7184 BIT(FLOW_DISSECTOR_KEY_ENC_KEYID))) {
7185 dev_err(&pf->pdev->dev, "Unsupported key used: 0x%x\n",
7186 f->dissector->used_keys);
7187 return -EOPNOTSUPP;
7188 }
cf05ed08 7189
2f4b411a
AN
7190 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
7191 struct flow_dissector_key_keyid *key =
7192 skb_flow_dissector_target(f->dissector,
7193 FLOW_DISSECTOR_KEY_ENC_KEYID,
7194 f->key);
7ec9ba11 7195
2f4b411a
AN
7196 struct flow_dissector_key_keyid *mask =
7197 skb_flow_dissector_target(f->dissector,
7198 FLOW_DISSECTOR_KEY_ENC_KEYID,
7199 f->mask);
7200
7201 if (mask->keyid != 0)
7202 field_flags |= I40E_CLOUD_FIELD_TEN_ID;
7203
7204 filter->tenant_id = be32_to_cpu(key->keyid);
cf05ed08
JB
7205 }
7206
2f4b411a
AN
7207 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
7208 struct flow_dissector_key_basic *key =
7209 skb_flow_dissector_target(f->dissector,
7210 FLOW_DISSECTOR_KEY_BASIC,
7211 f->key);
148c2d80 7212
2f4b411a
AN
7213 struct flow_dissector_key_basic *mask =
7214 skb_flow_dissector_target(f->dissector,
7215 FLOW_DISSECTOR_KEY_BASIC,
7216 f->mask);
7217
7218 n_proto_key = ntohs(key->n_proto);
7219 n_proto_mask = ntohs(mask->n_proto);
7220
7221 if (n_proto_key == ETH_P_ALL) {
7222 n_proto_key = 0;
7223 n_proto_mask = 0;
7224 }
7225 filter->n_proto = n_proto_key & n_proto_mask;
7226 filter->ip_proto = key->ip_proto;
cf05ed08
JB
7227 }
7228
2f4b411a
AN
7229 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
7230 struct flow_dissector_key_eth_addrs *key =
7231 skb_flow_dissector_target(f->dissector,
7232 FLOW_DISSECTOR_KEY_ETH_ADDRS,
7233 f->key);
7234
7235 struct flow_dissector_key_eth_addrs *mask =
7236 skb_flow_dissector_target(f->dissector,
7237 FLOW_DISSECTOR_KEY_ETH_ADDRS,
7238 f->mask);
7239
7240 /* use is_broadcast and is_zero to check for all 0xf or 0 */
7241 if (!is_zero_ether_addr(mask->dst)) {
7242 if (is_broadcast_ether_addr(mask->dst)) {
7243 field_flags |= I40E_CLOUD_FIELD_OMAC;
7244 } else {
7245 dev_err(&pf->pdev->dev, "Bad ether dest mask %pM\n",
7246 mask->dst);
7247 return I40E_ERR_CONFIG;
7248 }
7249 }
7250
7251 if (!is_zero_ether_addr(mask->src)) {
7252 if (is_broadcast_ether_addr(mask->src)) {
7253 field_flags |= I40E_CLOUD_FIELD_IMAC;
7254 } else {
7255 dev_err(&pf->pdev->dev, "Bad ether src mask %pM\n",
7256 mask->src);
7257 return I40E_ERR_CONFIG;
7258 }
7259 }
7260 ether_addr_copy(filter->dst_mac, key->dst);
7261 ether_addr_copy(filter->src_mac, key->src);
cf05ed08
JB
7262 }
7263
2f4b411a
AN
7264 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_VLAN)) {
7265 struct flow_dissector_key_vlan *key =
7266 skb_flow_dissector_target(f->dissector,
7267 FLOW_DISSECTOR_KEY_VLAN,
7268 f->key);
7269 struct flow_dissector_key_vlan *mask =
7270 skb_flow_dissector_target(f->dissector,
7271 FLOW_DISSECTOR_KEY_VLAN,
7272 f->mask);
3e03d7cc 7273
2f4b411a
AN
7274 if (mask->vlan_id) {
7275 if (mask->vlan_id == VLAN_VID_MASK) {
7276 field_flags |= I40E_CLOUD_FIELD_IVLAN;
3e03d7cc 7277
2f4b411a
AN
7278 } else {
7279 dev_err(&pf->pdev->dev, "Bad vlan mask 0x%04x\n",
7280 mask->vlan_id);
7281 return I40E_ERR_CONFIG;
7282 }
7283 }
68e49702 7284
2f4b411a
AN
7285 filter->vlan_id = cpu_to_be16(key->vlan_id);
7286 }
7287
7288 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_CONTROL)) {
7289 struct flow_dissector_key_control *key =
7290 skb_flow_dissector_target(f->dissector,
7291 FLOW_DISSECTOR_KEY_CONTROL,
7292 f->key);
7293
7294 addr_type = key->addr_type;
7295 }
7296
7297 if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
7298 struct flow_dissector_key_ipv4_addrs *key =
7299 skb_flow_dissector_target(f->dissector,
7300 FLOW_DISSECTOR_KEY_IPV4_ADDRS,
7301 f->key);
7302 struct flow_dissector_key_ipv4_addrs *mask =
7303 skb_flow_dissector_target(f->dissector,
7304 FLOW_DISSECTOR_KEY_IPV4_ADDRS,
7305 f->mask);
7306
7307 if (mask->dst) {
7308 if (mask->dst == cpu_to_be32(0xffffffff)) {
7309 field_flags |= I40E_CLOUD_FIELD_IIP;
7310 } else {
bf1099b5 7311 dev_err(&pf->pdev->dev, "Bad ip dst mask %pI4b\n",
2f4b411a
AN
7312 &mask->dst);
7313 return I40E_ERR_CONFIG;
7314 }
7315 }
7316
7317 if (mask->src) {
7318 if (mask->src == cpu_to_be32(0xffffffff)) {
7319 field_flags |= I40E_CLOUD_FIELD_IIP;
7320 } else {
bf1099b5 7321 dev_err(&pf->pdev->dev, "Bad ip src mask %pI4b\n",
2f4b411a
AN
7322 &mask->src);
7323 return I40E_ERR_CONFIG;
7324 }
7325 }
7326
7327 if (field_flags & I40E_CLOUD_FIELD_TEN_ID) {
7328 dev_err(&pf->pdev->dev, "Tenant id not allowed for ip filter\n");
7329 return I40E_ERR_CONFIG;
7330 }
7331 filter->dst_ipv4 = key->dst;
7332 filter->src_ipv4 = key->src;
7333 }
7334
7335 if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
7336 struct flow_dissector_key_ipv6_addrs *key =
7337 skb_flow_dissector_target(f->dissector,
7338 FLOW_DISSECTOR_KEY_IPV6_ADDRS,
7339 f->key);
7340 struct flow_dissector_key_ipv6_addrs *mask =
7341 skb_flow_dissector_target(f->dissector,
7342 FLOW_DISSECTOR_KEY_IPV6_ADDRS,
7343 f->mask);
7344
7345 /* src and dest IPV6 address should not be LOOPBACK
7346 * (0:0:0:0:0:0:0:1), which can be represented as ::1
68e49702 7347 */
2f4b411a
AN
7348 if (ipv6_addr_loopback(&key->dst) ||
7349 ipv6_addr_loopback(&key->src)) {
7350 dev_err(&pf->pdev->dev,
7351 "Bad ipv6, addr is LOOPBACK\n");
7352 return I40E_ERR_CONFIG;
7353 }
7354 if (!ipv6_addr_any(&mask->dst) || !ipv6_addr_any(&mask->src))
7355 field_flags |= I40E_CLOUD_FIELD_IIP;
7356
7357 memcpy(&filter->src_ipv6, &key->src.s6_addr32,
7358 sizeof(filter->src_ipv6));
7359 memcpy(&filter->dst_ipv6, &key->dst.s6_addr32,
7360 sizeof(filter->dst_ipv6));
7361 }
7362
7363 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_PORTS)) {
7364 struct flow_dissector_key_ports *key =
7365 skb_flow_dissector_target(f->dissector,
7366 FLOW_DISSECTOR_KEY_PORTS,
7367 f->key);
7368 struct flow_dissector_key_ports *mask =
7369 skb_flow_dissector_target(f->dissector,
7370 FLOW_DISSECTOR_KEY_PORTS,
7371 f->mask);
7372
7373 if (mask->src) {
7374 if (mask->src == cpu_to_be16(0xffff)) {
7375 field_flags |= I40E_CLOUD_FIELD_IIP;
7376 } else {
7377 dev_err(&pf->pdev->dev, "Bad src port mask 0x%04x\n",
7378 be16_to_cpu(mask->src));
7379 return I40E_ERR_CONFIG;
7380 }
7381 }
7382
7383 if (mask->dst) {
7384 if (mask->dst == cpu_to_be16(0xffff)) {
7385 field_flags |= I40E_CLOUD_FIELD_IIP;
7386 } else {
7387 dev_err(&pf->pdev->dev, "Bad dst port mask 0x%04x\n",
7388 be16_to_cpu(mask->dst));
7389 return I40E_ERR_CONFIG;
7390 }
7391 }
7392
7393 filter->dst_port = key->dst;
7394 filter->src_port = key->src;
7395
7396 switch (filter->ip_proto) {
7397 case IPPROTO_TCP:
7398 case IPPROTO_UDP:
7399 break;
7400 default:
7401 dev_err(&pf->pdev->dev,
7402 "Only UDP and TCP transport are supported\n");
7403 return -EINVAL;
68e49702 7404 }
3e03d7cc 7405 }
2f4b411a
AN
7406 filter->flags = field_flags;
7407 return 0;
7408}
3e03d7cc 7409
2f4b411a
AN
7410/**
7411 * i40e_handle_tclass: Forward to a traffic class on the device
7412 * @vsi: Pointer to VSI
7413 * @tc: traffic class index on the device
7414 * @filter: Pointer to cloud filter structure
7415 *
7416 **/
7417static int i40e_handle_tclass(struct i40e_vsi *vsi, u32 tc,
7418 struct i40e_cloud_filter *filter)
7419{
7420 struct i40e_channel *ch, *ch_tmp;
7421
7422 /* direct to a traffic class on the same device */
7423 if (tc == 0) {
7424 filter->seid = vsi->seid;
7425 return 0;
7426 } else if (vsi->tc_config.enabled_tc & BIT(tc)) {
7427 if (!filter->dst_port) {
7428 dev_err(&vsi->back->pdev->dev,
7429 "Specify destination port to direct to traffic class that is not default\n");
7430 return -EINVAL;
7431 }
7432 if (list_empty(&vsi->ch_list))
7433 return -EINVAL;
7434 list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list,
7435 list) {
7436 if (ch->seid == vsi->tc_seid_map[tc])
7437 filter->seid = ch->seid;
7438 }
7439 return 0;
7440 }
7441 dev_err(&vsi->back->pdev->dev, "TC is not enabled\n");
7442 return -EINVAL;
cf05ed08 7443}
4e3b35b0 7444
41c445ff 7445/**
2f4b411a
AN
7446 * i40e_configure_clsflower - Configure tc flower filters
7447 * @vsi: Pointer to VSI
7448 * @cls_flower: Pointer to struct tc_cls_flower_offload
7449 *
41c445ff 7450 **/
2f4b411a
AN
7451static int i40e_configure_clsflower(struct i40e_vsi *vsi,
7452 struct tc_cls_flower_offload *cls_flower)
41c445ff 7453{
2f4b411a
AN
7454 int tc = tc_classid_to_hwtc(vsi->netdev, cls_flower->classid);
7455 struct i40e_cloud_filter *filter = NULL;
41c445ff 7456 struct i40e_pf *pf = vsi->back;
2f4b411a 7457 int err = 0;
41c445ff 7458
2f4b411a
AN
7459 if (tc < 0) {
7460 dev_err(&vsi->back->pdev->dev, "Invalid traffic class\n");
bc4244c6 7461 return -EOPNOTSUPP;
2f4b411a 7462 }
41c445ff 7463
2f4b411a
AN
7464 if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state) ||
7465 test_bit(__I40E_RESET_INTR_RECEIVED, pf->state))
7466 return -EBUSY;
41c445ff 7467
2f4b411a
AN
7468 if (pf->fdir_pf_active_filters ||
7469 (!hlist_empty(&pf->fdir_filter_list))) {
7470 dev_err(&vsi->back->pdev->dev,
7471 "Flow Director Sideband filters exists, turn ntuple off to configure cloud filters\n");
7472 return -EINVAL;
7473 }
41c445ff 7474
2f4b411a
AN
7475 if (vsi->back->flags & I40E_FLAG_FD_SB_ENABLED) {
7476 dev_err(&vsi->back->pdev->dev,
7477 "Disable Flow Director Sideband, configuring Cloud filters via tc-flower\n");
7478 vsi->back->flags &= ~I40E_FLAG_FD_SB_ENABLED;
7479 vsi->back->flags |= I40E_FLAG_FD_SB_TO_CLOUD_FILTER;
41c445ff 7480 }
ca64fa4e 7481
2f4b411a
AN
7482 filter = kzalloc(sizeof(*filter), GFP_KERNEL);
7483 if (!filter)
7484 return -ENOMEM;
7485
7486 filter->cookie = cls_flower->cookie;
7487
7488 err = i40e_parse_cls_flower(vsi, cls_flower, filter);
7489 if (err < 0)
7490 goto err;
7491
7492 err = i40e_handle_tclass(vsi, tc, filter);
7493 if (err < 0)
7494 goto err;
7495
7496 /* Add cloud filter */
7497 if (filter->dst_port)
7498 err = i40e_add_del_cloud_filter_big_buf(vsi, filter, true);
7499 else
7500 err = i40e_add_del_cloud_filter(vsi, filter, true);
7501
7502 if (err) {
7503 dev_err(&pf->pdev->dev,
7504 "Failed to add cloud filter, err %s\n",
7505 i40e_stat_str(&pf->hw, err));
2f4b411a 7506 goto err;
1e1be8f6 7507 }
e3219ce6 7508
2f4b411a
AN
7509 /* add filter to the ordered list */
7510 INIT_HLIST_NODE(&filter->cloud_node);
41c445ff 7511
2f4b411a
AN
7512 hlist_add_head(&filter->cloud_node, &pf->cloud_filter_list);
7513
7514 pf->num_cloud_filters++;
7515
7516 return err;
7517err:
7518 kfree(filter);
7519 return err;
41c445ff
JB
7520}
7521
7522/**
2f4b411a
AN
7523 * i40e_find_cloud_filter - Find the could filter in the list
7524 * @vsi: Pointer to VSI
7525 * @cookie: filter specific cookie
41c445ff 7526 *
41c445ff 7527 **/
2f4b411a
AN
7528static struct i40e_cloud_filter *i40e_find_cloud_filter(struct i40e_vsi *vsi,
7529 unsigned long *cookie)
41c445ff 7530{
2f4b411a
AN
7531 struct i40e_cloud_filter *filter = NULL;
7532 struct hlist_node *node2;
41c445ff 7533
2f4b411a
AN
7534 hlist_for_each_entry_safe(filter, node2,
7535 &vsi->back->cloud_filter_list, cloud_node)
7536 if (!memcmp(cookie, &filter->cookie, sizeof(filter->cookie)))
7537 return filter;
7538 return NULL;
41c445ff
JB
7539}
7540
7541/**
2f4b411a
AN
7542 * i40e_delete_clsflower - Remove tc flower filters
7543 * @vsi: Pointer to VSI
7544 * @cls_flower: Pointer to struct tc_cls_flower_offload
7545 *
41c445ff 7546 **/
2f4b411a
AN
7547static int i40e_delete_clsflower(struct i40e_vsi *vsi,
7548 struct tc_cls_flower_offload *cls_flower)
41c445ff 7549{
2f4b411a
AN
7550 struct i40e_cloud_filter *filter = NULL;
7551 struct i40e_pf *pf = vsi->back;
7552 int err = 0;
41c445ff 7553
2f4b411a 7554 filter = i40e_find_cloud_filter(vsi, &cls_flower->cookie);
41c445ff 7555
2f4b411a
AN
7556 if (!filter)
7557 return -EINVAL;
41c445ff 7558
2f4b411a 7559 hash_del(&filter->cloud_node);
41c445ff 7560
2f4b411a
AN
7561 if (filter->dst_port)
7562 err = i40e_add_del_cloud_filter_big_buf(vsi, filter, false);
7563 else
7564 err = i40e_add_del_cloud_filter(vsi, filter, false);
41c445ff 7565
2f4b411a
AN
7566 kfree(filter);
7567 if (err) {
7568 dev_err(&pf->pdev->dev,
7569 "Failed to delete cloud filter, err %s\n",
7570 i40e_stat_str(&pf->hw, err));
7571 return i40e_aq_rc_to_posix(err, pf->hw.aq.asq_last_status);
41c445ff 7572 }
f980d445 7573
2f4b411a
AN
7574 pf->num_cloud_filters--;
7575 if (!pf->num_cloud_filters)
7576 if ((pf->flags & I40E_FLAG_FD_SB_TO_CLOUD_FILTER) &&
7577 !(pf->flags & I40E_FLAG_FD_SB_INACTIVE)) {
7578 pf->flags |= I40E_FLAG_FD_SB_ENABLED;
7579 pf->flags &= ~I40E_FLAG_FD_SB_TO_CLOUD_FILTER;
7580 pf->flags &= ~I40E_FLAG_FD_SB_INACTIVE;
7581 }
7582 return 0;
41c445ff
JB
7583}
7584
7585/**
2f4b411a 7586 * i40e_setup_tc_cls_flower - flower classifier offloads
41c445ff 7587 * @netdev: net device to configure
2f4b411a 7588 * @type_data: offload data
41c445ff 7589 **/
2f4b411a
AN
7590static int i40e_setup_tc_cls_flower(struct i40e_netdev_priv *np,
7591 struct tc_cls_flower_offload *cls_flower)
41c445ff 7592{
41c445ff 7593 struct i40e_vsi *vsi = np->vsi;
41c445ff 7594
2f4b411a
AN
7595 switch (cls_flower->command) {
7596 case TC_CLSFLOWER_REPLACE:
7597 return i40e_configure_clsflower(vsi, cls_flower);
7598 case TC_CLSFLOWER_DESTROY:
7599 return i40e_delete_clsflower(vsi, cls_flower);
7600 case TC_CLSFLOWER_STATS:
7601 return -EOPNOTSUPP;
7602 default:
246ab6f0 7603 return -EOPNOTSUPP;
41c445ff 7604 }
2f4b411a 7605}
41c445ff 7606
2f4b411a
AN
7607static int i40e_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
7608 void *cb_priv)
7609{
7610 struct i40e_netdev_priv *np = cb_priv;
41c445ff 7611
a0d8637f
JK
7612 if (!tc_cls_can_offload_and_chain0(np->vsi->netdev, type_data))
7613 return -EOPNOTSUPP;
7614
2f4b411a
AN
7615 switch (type) {
7616 case TC_SETUP_CLSFLOWER:
7617 return i40e_setup_tc_cls_flower(np, type_data);
41c445ff 7618
2f4b411a
AN
7619 default:
7620 return -EOPNOTSUPP;
41c445ff 7621 }
2f4b411a 7622}
41c445ff 7623
2f4b411a
AN
7624static int i40e_setup_tc_block(struct net_device *dev,
7625 struct tc_block_offload *f)
7626{
7627 struct i40e_netdev_priv *np = netdev_priv(dev);
41c445ff 7628
2f4b411a
AN
7629 if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
7630 return -EOPNOTSUPP;
7631
7632 switch (f->command) {
7633 case TC_BLOCK_BIND:
7634 return tcf_block_cb_register(f->block, i40e_setup_tc_block_cb,
60513bd8 7635 np, np, f->extack);
2f4b411a
AN
7636 case TC_BLOCK_UNBIND:
7637 tcf_block_cb_unregister(f->block, i40e_setup_tc_block_cb, np);
7638 return 0;
7639 default:
7640 return -EOPNOTSUPP;
7641 }
41c445ff
JB
7642}
7643
2572ac53 7644static int __i40e_setup_tc(struct net_device *netdev, enum tc_setup_type type,
de4784ca 7645 void *type_data)
e4c6734e 7646{
2f4b411a 7647 switch (type) {
575ed7d3 7648 case TC_SETUP_QDISC_MQPRIO:
2f4b411a
AN
7649 return i40e_setup_tc(netdev, type_data);
7650 case TC_SETUP_BLOCK:
7651 return i40e_setup_tc_block(netdev, type_data);
7652 default:
38cf0426 7653 return -EOPNOTSUPP;
2f4b411a 7654 }
e4c6734e
JF
7655}
7656
41c445ff
JB
7657/**
7658 * i40e_open - Called when a network interface is made active
7659 * @netdev: network interface device structure
7660 *
7661 * The open entry point is called when a network interface is made
7662 * active by the system (IFF_UP). At this point all resources needed
7663 * for transmit and receive operations are allocated, the interrupt
7664 * handler is registered with the OS, the netdev watchdog subtask is
7665 * enabled, and the stack is notified that the interface is ready.
7666 *
7667 * Returns 0 on success, negative value on failure
7668 **/
38e00438 7669int i40e_open(struct net_device *netdev)
41c445ff
JB
7670{
7671 struct i40e_netdev_priv *np = netdev_priv(netdev);
7672 struct i40e_vsi *vsi = np->vsi;
7673 struct i40e_pf *pf = vsi->back;
41c445ff
JB
7674 int err;
7675
4eb3f768 7676 /* disallow open during test or if eeprom is broken */
0da36b97
JK
7677 if (test_bit(__I40E_TESTING, pf->state) ||
7678 test_bit(__I40E_BAD_EEPROM, pf->state))
41c445ff
JB
7679 return -EBUSY;
7680
7681 netif_carrier_off(netdev);
7682
c3880bd1
MS
7683 if (i40e_force_link_state(pf, true))
7684 return -EAGAIN;
7685
6c167f58
EK
7686 err = i40e_vsi_open(vsi);
7687 if (err)
7688 return err;
7689
059dab69
JB
7690 /* configure global TSO hardware offload settings */
7691 wr32(&pf->hw, I40E_GLLAN_TSOMSK_F, be32_to_cpu(TCP_FLAG_PSH |
7692 TCP_FLAG_FIN) >> 16);
7693 wr32(&pf->hw, I40E_GLLAN_TSOMSK_M, be32_to_cpu(TCP_FLAG_PSH |
7694 TCP_FLAG_FIN |
7695 TCP_FLAG_CWR) >> 16);
7696 wr32(&pf->hw, I40E_GLLAN_TSOMSK_L, be32_to_cpu(TCP_FLAG_CWR) >> 16);
7697
06a5f7f1 7698 udp_tunnel_get_rx_info(netdev);
e3219ce6 7699
6c167f58
EK
7700 return 0;
7701}
7702
7703/**
7704 * i40e_vsi_open -
7705 * @vsi: the VSI to open
7706 *
7707 * Finish initialization of the VSI.
7708 *
7709 * Returns 0 on success, negative value on failure
373149fc
MS
7710 *
7711 * Note: expects to be called while under rtnl_lock()
6c167f58
EK
7712 **/
7713int i40e_vsi_open(struct i40e_vsi *vsi)
7714{
7715 struct i40e_pf *pf = vsi->back;
b294ac70 7716 char int_name[I40E_INT_NAME_STR_LEN];
6c167f58
EK
7717 int err;
7718
41c445ff
JB
7719 /* allocate descriptors */
7720 err = i40e_vsi_setup_tx_resources(vsi);
7721 if (err)
7722 goto err_setup_tx;
7723 err = i40e_vsi_setup_rx_resources(vsi);
7724 if (err)
7725 goto err_setup_rx;
7726
7727 err = i40e_vsi_configure(vsi);
7728 if (err)
7729 goto err_setup_rx;
7730
c22e3c6c
SN
7731 if (vsi->netdev) {
7732 snprintf(int_name, sizeof(int_name) - 1, "%s-%s",
7733 dev_driver_string(&pf->pdev->dev), vsi->netdev->name);
7734 err = i40e_vsi_request_irq(vsi, int_name);
7735 if (err)
7736 goto err_setup_rx;
41c445ff 7737
c22e3c6c
SN
7738 /* Notify the stack of the actual queue counts. */
7739 err = netif_set_real_num_tx_queues(vsi->netdev,
7740 vsi->num_queue_pairs);
7741 if (err)
7742 goto err_set_queues;
25946ddb 7743
c22e3c6c
SN
7744 err = netif_set_real_num_rx_queues(vsi->netdev,
7745 vsi->num_queue_pairs);
7746 if (err)
7747 goto err_set_queues;
8a9eb7d3
SN
7748
7749 } else if (vsi->type == I40E_VSI_FDIR) {
e240f674 7750 snprintf(int_name, sizeof(int_name) - 1, "%s-%s:fdir",
b2008cbf
CW
7751 dev_driver_string(&pf->pdev->dev),
7752 dev_name(&pf->pdev->dev));
8a9eb7d3 7753 err = i40e_vsi_request_irq(vsi, int_name);
b2008cbf 7754
c22e3c6c 7755 } else {
ce9ccb17 7756 err = -EINVAL;
6c167f58
EK
7757 goto err_setup_rx;
7758 }
25946ddb 7759
41c445ff
JB
7760 err = i40e_up_complete(vsi);
7761 if (err)
7762 goto err_up_complete;
7763
41c445ff
JB
7764 return 0;
7765
7766err_up_complete:
7767 i40e_down(vsi);
25946ddb 7768err_set_queues:
41c445ff
JB
7769 i40e_vsi_free_irq(vsi);
7770err_setup_rx:
7771 i40e_vsi_free_rx_resources(vsi);
7772err_setup_tx:
7773 i40e_vsi_free_tx_resources(vsi);
7774 if (vsi == pf->vsi[pf->lan_vsi])
ff424188 7775 i40e_do_reset(pf, I40E_PF_RESET_FLAG, true);
41c445ff
JB
7776
7777 return err;
7778}
7779
17a73f6b
JG
7780/**
7781 * i40e_fdir_filter_exit - Cleans up the Flow Director accounting
b40c82e6 7782 * @pf: Pointer to PF
17a73f6b
JG
7783 *
7784 * This function destroys the hlist where all the Flow Director
7785 * filters were saved.
7786 **/
7787static void i40e_fdir_filter_exit(struct i40e_pf *pf)
7788{
7789 struct i40e_fdir_filter *filter;
0e588de1 7790 struct i40e_flex_pit *pit_entry, *tmp;
17a73f6b
JG
7791 struct hlist_node *node2;
7792
7793 hlist_for_each_entry_safe(filter, node2,
7794 &pf->fdir_filter_list, fdir_node) {
7795 hlist_del(&filter->fdir_node);
7796 kfree(filter);
7797 }
097dbf52 7798
0e588de1
JK
7799 list_for_each_entry_safe(pit_entry, tmp, &pf->l3_flex_pit_list, list) {
7800 list_del(&pit_entry->list);
7801 kfree(pit_entry);
7802 }
7803 INIT_LIST_HEAD(&pf->l3_flex_pit_list);
7804
7805 list_for_each_entry_safe(pit_entry, tmp, &pf->l4_flex_pit_list, list) {
7806 list_del(&pit_entry->list);
7807 kfree(pit_entry);
7808 }
7809 INIT_LIST_HEAD(&pf->l4_flex_pit_list);
7810
17a73f6b 7811 pf->fdir_pf_active_filters = 0;
097dbf52
JK
7812 pf->fd_tcp4_filter_cnt = 0;
7813 pf->fd_udp4_filter_cnt = 0;
f223c875 7814 pf->fd_sctp4_filter_cnt = 0;
097dbf52 7815 pf->fd_ip4_filter_cnt = 0;
3bcee1e6
JK
7816
7817 /* Reprogram the default input set for TCP/IPv4 */
7818 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_TCP,
7819 I40E_L3_SRC_MASK | I40E_L3_DST_MASK |
7820 I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
7821
7822 /* Reprogram the default input set for UDP/IPv4 */
7823 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_UDP,
7824 I40E_L3_SRC_MASK | I40E_L3_DST_MASK |
7825 I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
7826
7827 /* Reprogram the default input set for SCTP/IPv4 */
7828 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_SCTP,
7829 I40E_L3_SRC_MASK | I40E_L3_DST_MASK |
7830 I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
7831
7832 /* Reprogram the default input set for Other/IPv4 */
7833 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_OTHER,
7834 I40E_L3_SRC_MASK | I40E_L3_DST_MASK);
02b4016b
JK
7835
7836 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_FRAG_IPV4,
7837 I40E_L3_SRC_MASK | I40E_L3_DST_MASK);
17a73f6b
JG
7838}
7839
aaf66502
AN
7840/**
7841 * i40e_cloud_filter_exit - Cleans up the cloud filters
7842 * @pf: Pointer to PF
7843 *
7844 * This function destroys the hlist where all the cloud filters
7845 * were saved.
7846 **/
7847static void i40e_cloud_filter_exit(struct i40e_pf *pf)
7848{
7849 struct i40e_cloud_filter *cfilter;
7850 struct hlist_node *node;
7851
7852 hlist_for_each_entry_safe(cfilter, node,
7853 &pf->cloud_filter_list, cloud_node) {
7854 hlist_del(&cfilter->cloud_node);
7855 kfree(cfilter);
7856 }
7857 pf->num_cloud_filters = 0;
2f4b411a
AN
7858
7859 if ((pf->flags & I40E_FLAG_FD_SB_TO_CLOUD_FILTER) &&
7860 !(pf->flags & I40E_FLAG_FD_SB_INACTIVE)) {
7861 pf->flags |= I40E_FLAG_FD_SB_ENABLED;
7862 pf->flags &= ~I40E_FLAG_FD_SB_TO_CLOUD_FILTER;
7863 pf->flags &= ~I40E_FLAG_FD_SB_INACTIVE;
7864 }
aaf66502
AN
7865}
7866
41c445ff
JB
7867/**
7868 * i40e_close - Disables a network interface
7869 * @netdev: network interface device structure
7870 *
7871 * The close entry point is called when an interface is de-activated
7872 * by the OS. The hardware is still under the driver's control, but
7873 * this netdev interface is disabled.
7874 *
7875 * Returns 0, this is not allowed to fail
7876 **/
38e00438 7877int i40e_close(struct net_device *netdev)
41c445ff
JB
7878{
7879 struct i40e_netdev_priv *np = netdev_priv(netdev);
7880 struct i40e_vsi *vsi = np->vsi;
7881
90ef8d47 7882 i40e_vsi_close(vsi);
41c445ff
JB
7883
7884 return 0;
7885}
7886
7887/**
7888 * i40e_do_reset - Start a PF or Core Reset sequence
7889 * @pf: board private structure
7890 * @reset_flags: which reset is requested
373149fc
MS
7891 * @lock_acquired: indicates whether or not the lock has been acquired
7892 * before this function was called.
41c445ff
JB
7893 *
7894 * The essential difference in resets is that the PF Reset
7895 * doesn't clear the packet buffers, doesn't reset the PE
7896 * firmware, and doesn't bother the other PFs on the chip.
7897 **/
373149fc 7898void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags, bool lock_acquired)
41c445ff
JB
7899{
7900 u32 val;
7901
7902 WARN_ON(in_interrupt());
7903
263fc48f 7904
41c445ff 7905 /* do the biggest reset indicated */
41a1d04b 7906 if (reset_flags & BIT_ULL(__I40E_GLOBAL_RESET_REQUESTED)) {
41c445ff
JB
7907
7908 /* Request a Global Reset
7909 *
7910 * This will start the chip's countdown to the actual full
7911 * chip reset event, and a warning interrupt to be sent
7912 * to all PFs, including the requestor. Our handler
7913 * for the warning interrupt will deal with the shutdown
7914 * and recovery of the switch setup.
7915 */
69bfb110 7916 dev_dbg(&pf->pdev->dev, "GlobalR requested\n");
41c445ff
JB
7917 val = rd32(&pf->hw, I40E_GLGEN_RTRIG);
7918 val |= I40E_GLGEN_RTRIG_GLOBR_MASK;
7919 wr32(&pf->hw, I40E_GLGEN_RTRIG, val);
7920
41a1d04b 7921 } else if (reset_flags & BIT_ULL(__I40E_CORE_RESET_REQUESTED)) {
41c445ff
JB
7922
7923 /* Request a Core Reset
7924 *
7925 * Same as Global Reset, except does *not* include the MAC/PHY
7926 */
69bfb110 7927 dev_dbg(&pf->pdev->dev, "CoreR requested\n");
41c445ff
JB
7928 val = rd32(&pf->hw, I40E_GLGEN_RTRIG);
7929 val |= I40E_GLGEN_RTRIG_CORER_MASK;
7930 wr32(&pf->hw, I40E_GLGEN_RTRIG, val);
7931 i40e_flush(&pf->hw);
7932
ff424188 7933 } else if (reset_flags & I40E_PF_RESET_FLAG) {
41c445ff
JB
7934
7935 /* Request a PF Reset
7936 *
7937 * Resets only the PF-specific registers
7938 *
7939 * This goes directly to the tear-down and rebuild of
7940 * the switch, since we need to do all the recovery as
7941 * for the Core Reset.
7942 */
69bfb110 7943 dev_dbg(&pf->pdev->dev, "PFR requested\n");
373149fc 7944 i40e_handle_reset_warning(pf, lock_acquired);
41c445ff 7945
41a1d04b 7946 } else if (reset_flags & BIT_ULL(__I40E_REINIT_REQUESTED)) {
41c445ff
JB
7947 int v;
7948
7949 /* Find the VSI(s) that requested a re-init */
7950 dev_info(&pf->pdev->dev,
7951 "VSI reinit requested\n");
505682cd 7952 for (v = 0; v < pf->num_alloc_vsi; v++) {
41c445ff 7953 struct i40e_vsi *vsi = pf->vsi[v];
6995b36c 7954
41c445ff 7955 if (vsi != NULL &&
d19cb64b 7956 test_and_clear_bit(__I40E_VSI_REINIT_REQUESTED,
0da36b97 7957 vsi->state))
41c445ff 7958 i40e_vsi_reinit_locked(pf->vsi[v]);
41c445ff 7959 }
41a1d04b 7960 } else if (reset_flags & BIT_ULL(__I40E_DOWN_REQUESTED)) {
b5d06f05
NP
7961 int v;
7962
7963 /* Find the VSI(s) that needs to be brought down */
7964 dev_info(&pf->pdev->dev, "VSI down requested\n");
7965 for (v = 0; v < pf->num_alloc_vsi; v++) {
7966 struct i40e_vsi *vsi = pf->vsi[v];
6995b36c 7967
b5d06f05 7968 if (vsi != NULL &&
d19cb64b 7969 test_and_clear_bit(__I40E_VSI_DOWN_REQUESTED,
0da36b97
JK
7970 vsi->state)) {
7971 set_bit(__I40E_VSI_DOWN, vsi->state);
b5d06f05 7972 i40e_down(vsi);
b5d06f05
NP
7973 }
7974 }
41c445ff
JB
7975 } else {
7976 dev_info(&pf->pdev->dev,
7977 "bad reset request 0x%08x\n", reset_flags);
41c445ff
JB
7978 }
7979}
7980
4e3b35b0
NP
7981#ifdef CONFIG_I40E_DCB
7982/**
7983 * i40e_dcb_need_reconfig - Check if DCB needs reconfig
7984 * @pf: board private structure
7985 * @old_cfg: current DCB config
7986 * @new_cfg: new DCB config
7987 **/
7988bool i40e_dcb_need_reconfig(struct i40e_pf *pf,
7989 struct i40e_dcbx_config *old_cfg,
7990 struct i40e_dcbx_config *new_cfg)
7991{
7992 bool need_reconfig = false;
7993
7994 /* Check if ETS configuration has changed */
7995 if (memcmp(&new_cfg->etscfg,
7996 &old_cfg->etscfg,
7997 sizeof(new_cfg->etscfg))) {
7998 /* If Priority Table has changed reconfig is needed */
7999 if (memcmp(&new_cfg->etscfg.prioritytable,
8000 &old_cfg->etscfg.prioritytable,
8001 sizeof(new_cfg->etscfg.prioritytable))) {
8002 need_reconfig = true;
69bfb110 8003 dev_dbg(&pf->pdev->dev, "ETS UP2TC changed.\n");
4e3b35b0
NP
8004 }
8005
8006 if (memcmp(&new_cfg->etscfg.tcbwtable,
8007 &old_cfg->etscfg.tcbwtable,
8008 sizeof(new_cfg->etscfg.tcbwtable)))
69bfb110 8009 dev_dbg(&pf->pdev->dev, "ETS TC BW Table changed.\n");
4e3b35b0
NP
8010
8011 if (memcmp(&new_cfg->etscfg.tsatable,
8012 &old_cfg->etscfg.tsatable,
8013 sizeof(new_cfg->etscfg.tsatable)))
69bfb110 8014 dev_dbg(&pf->pdev->dev, "ETS TSA Table changed.\n");
4e3b35b0
NP
8015 }
8016
8017 /* Check if PFC configuration has changed */
8018 if (memcmp(&new_cfg->pfc,
8019 &old_cfg->pfc,
8020 sizeof(new_cfg->pfc))) {
8021 need_reconfig = true;
69bfb110 8022 dev_dbg(&pf->pdev->dev, "PFC config change detected.\n");
4e3b35b0
NP
8023 }
8024
8025 /* Check if APP Table has changed */
8026 if (memcmp(&new_cfg->app,
8027 &old_cfg->app,
3d9667a9 8028 sizeof(new_cfg->app))) {
4e3b35b0 8029 need_reconfig = true;
69bfb110 8030 dev_dbg(&pf->pdev->dev, "APP Table change detected.\n");
3d9667a9 8031 }
4e3b35b0 8032
fb43201f 8033 dev_dbg(&pf->pdev->dev, "dcb need_reconfig=%d\n", need_reconfig);
4e3b35b0
NP
8034 return need_reconfig;
8035}
8036
8037/**
8038 * i40e_handle_lldp_event - Handle LLDP Change MIB event
8039 * @pf: board private structure
8040 * @e: event info posted on ARQ
8041 **/
8042static int i40e_handle_lldp_event(struct i40e_pf *pf,
8043 struct i40e_arq_event_info *e)
8044{
8045 struct i40e_aqc_lldp_get_mib *mib =
8046 (struct i40e_aqc_lldp_get_mib *)&e->desc.params.raw;
8047 struct i40e_hw *hw = &pf->hw;
4e3b35b0
NP
8048 struct i40e_dcbx_config tmp_dcbx_cfg;
8049 bool need_reconfig = false;
8050 int ret = 0;
8051 u8 type;
8052
4d9b6043 8053 /* Not DCB capable or capability disabled */
ea6acb7e 8054 if (!(pf->flags & I40E_FLAG_DCB_CAPABLE))
4d9b6043
NP
8055 return ret;
8056
4e3b35b0
NP
8057 /* Ignore if event is not for Nearest Bridge */
8058 type = ((mib->type >> I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT)
8059 & I40E_AQ_LLDP_BRIDGE_TYPE_MASK);
fb43201f 8060 dev_dbg(&pf->pdev->dev, "LLDP event mib bridge type 0x%x\n", type);
4e3b35b0
NP
8061 if (type != I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE)
8062 return ret;
8063
8064 /* Check MIB Type and return if event for Remote MIB update */
8065 type = mib->type & I40E_AQ_LLDP_MIB_TYPE_MASK;
9fa61dd2 8066 dev_dbg(&pf->pdev->dev,
fb43201f 8067 "LLDP event mib type %s\n", type ? "remote" : "local");
4e3b35b0
NP
8068 if (type == I40E_AQ_LLDP_MIB_REMOTE) {
8069 /* Update the remote cached instance and return */
8070 ret = i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_REMOTE,
8071 I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE,
8072 &hw->remote_dcbx_config);
8073 goto exit;
8074 }
8075
9fa61dd2 8076 /* Store the old configuration */
1a2f6248 8077 tmp_dcbx_cfg = hw->local_dcbx_config;
9fa61dd2 8078
750fcbcf
NP
8079 /* Reset the old DCBx configuration data */
8080 memset(&hw->local_dcbx_config, 0, sizeof(hw->local_dcbx_config));
9fa61dd2
NP
8081 /* Get updated DCBX data from firmware */
8082 ret = i40e_get_dcb_config(&pf->hw);
4e3b35b0 8083 if (ret) {
f1c7e72e
SN
8084 dev_info(&pf->pdev->dev,
8085 "Failed querying DCB configuration data from firmware, err %s aq_err %s\n",
8086 i40e_stat_str(&pf->hw, ret),
8087 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
4e3b35b0
NP
8088 goto exit;
8089 }
8090
8091 /* No change detected in DCBX configs */
750fcbcf
NP
8092 if (!memcmp(&tmp_dcbx_cfg, &hw->local_dcbx_config,
8093 sizeof(tmp_dcbx_cfg))) {
69bfb110 8094 dev_dbg(&pf->pdev->dev, "No change detected in DCBX configuration.\n");
4e3b35b0
NP
8095 goto exit;
8096 }
8097
750fcbcf
NP
8098 need_reconfig = i40e_dcb_need_reconfig(pf, &tmp_dcbx_cfg,
8099 &hw->local_dcbx_config);
4e3b35b0 8100
750fcbcf 8101 i40e_dcbnl_flush_apps(pf, &tmp_dcbx_cfg, &hw->local_dcbx_config);
4e3b35b0
NP
8102
8103 if (!need_reconfig)
8104 goto exit;
8105
4d9b6043 8106 /* Enable DCB tagging only when more than one TC */
750fcbcf 8107 if (i40e_dcb_get_num_tc(&hw->local_dcbx_config) > 1)
4d9b6043
NP
8108 pf->flags |= I40E_FLAG_DCB_ENABLED;
8109 else
8110 pf->flags &= ~I40E_FLAG_DCB_ENABLED;
8111
0da36b97 8112 set_bit(__I40E_PORT_SUSPENDED, pf->state);
4e3b35b0
NP
8113 /* Reconfiguration needed quiesce all VSIs */
8114 i40e_pf_quiesce_all_vsi(pf);
8115
8116 /* Changes in configuration update VEB/VSI */
8117 i40e_dcb_reconfigure(pf);
8118
2fd75f31
NP
8119 ret = i40e_resume_port_tx(pf);
8120
0da36b97 8121 clear_bit(__I40E_PORT_SUSPENDED, pf->state);
2fd75f31 8122 /* In case of error no point in resuming VSIs */
69129dc3
NP
8123 if (ret)
8124 goto exit;
8125
3fe06f41
NP
8126 /* Wait for the PF's queues to be disabled */
8127 ret = i40e_pf_wait_queues_disabled(pf);
11e47708
PN
8128 if (ret) {
8129 /* Schedule PF reset to recover */
0da36b97 8130 set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
11e47708
PN
8131 i40e_service_event_schedule(pf);
8132 } else {
2fd75f31 8133 i40e_pf_unquiesce_all_vsi(pf);
5f76a704
JK
8134 set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state);
8135 set_bit(__I40E_CLIENT_L2_CHANGE, pf->state);
11e47708
PN
8136 }
8137
4e3b35b0
NP
8138exit:
8139 return ret;
8140}
8141#endif /* CONFIG_I40E_DCB */
8142
23326186
ASJ
8143/**
8144 * i40e_do_reset_safe - Protected reset path for userland calls.
8145 * @pf: board private structure
8146 * @reset_flags: which reset is requested
8147 *
8148 **/
8149void i40e_do_reset_safe(struct i40e_pf *pf, u32 reset_flags)
8150{
8151 rtnl_lock();
373149fc 8152 i40e_do_reset(pf, reset_flags, true);
23326186
ASJ
8153 rtnl_unlock();
8154}
8155
41c445ff
JB
8156/**
8157 * i40e_handle_lan_overflow_event - Handler for LAN queue overflow event
8158 * @pf: board private structure
8159 * @e: event info posted on ARQ
8160 *
8161 * Handler for LAN Queue Overflow Event generated by the firmware for PF
8162 * and VF queues
8163 **/
8164static void i40e_handle_lan_overflow_event(struct i40e_pf *pf,
8165 struct i40e_arq_event_info *e)
8166{
8167 struct i40e_aqc_lan_overflow *data =
8168 (struct i40e_aqc_lan_overflow *)&e->desc.params.raw;
8169 u32 queue = le32_to_cpu(data->prtdcb_rupto);
8170 u32 qtx_ctl = le32_to_cpu(data->otx_ctl);
8171 struct i40e_hw *hw = &pf->hw;
8172 struct i40e_vf *vf;
8173 u16 vf_id;
8174
69bfb110
JB
8175 dev_dbg(&pf->pdev->dev, "overflow Rx Queue Number = %d QTX_CTL=0x%08x\n",
8176 queue, qtx_ctl);
41c445ff
JB
8177
8178 /* Queue belongs to VF, find the VF and issue VF reset */
8179 if (((qtx_ctl & I40E_QTX_CTL_PFVF_Q_MASK)
8180 >> I40E_QTX_CTL_PFVF_Q_SHIFT) == I40E_QTX_CTL_VF_QUEUE) {
8181 vf_id = (u16)((qtx_ctl & I40E_QTX_CTL_VFVM_INDX_MASK)
8182 >> I40E_QTX_CTL_VFVM_INDX_SHIFT);
8183 vf_id -= hw->func_caps.vf_base_id;
8184 vf = &pf->vf[vf_id];
8185 i40e_vc_notify_vf_reset(vf);
8186 /* Allow VF to process pending reset notification */
8187 msleep(20);
8188 i40e_reset_vf(vf, false);
8189 }
8190}
8191
55a5e60b 8192/**
12957388
ASJ
8193 * i40e_get_cur_guaranteed_fd_count - Get the consumed guaranteed FD filters
8194 * @pf: board private structure
8195 **/
04294e38 8196u32 i40e_get_cur_guaranteed_fd_count(struct i40e_pf *pf)
12957388 8197{
04294e38 8198 u32 val, fcnt_prog;
12957388
ASJ
8199
8200 val = rd32(&pf->hw, I40E_PFQF_FDSTAT);
8201 fcnt_prog = (val & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK);
8202 return fcnt_prog;
8203}
8204
8205/**
04294e38 8206 * i40e_get_current_fd_count - Get total FD filters programmed for this PF
55a5e60b
ASJ
8207 * @pf: board private structure
8208 **/
04294e38 8209u32 i40e_get_current_fd_count(struct i40e_pf *pf)
55a5e60b 8210{
04294e38
ASJ
8211 u32 val, fcnt_prog;
8212
55a5e60b
ASJ
8213 val = rd32(&pf->hw, I40E_PFQF_FDSTAT);
8214 fcnt_prog = (val & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK) +
8215 ((val & I40E_PFQF_FDSTAT_BEST_CNT_MASK) >>
8216 I40E_PFQF_FDSTAT_BEST_CNT_SHIFT);
8217 return fcnt_prog;
8218}
1e1be8f6 8219
04294e38
ASJ
8220/**
8221 * i40e_get_global_fd_count - Get total FD filters programmed on device
8222 * @pf: board private structure
8223 **/
8224u32 i40e_get_global_fd_count(struct i40e_pf *pf)
8225{
8226 u32 val, fcnt_prog;
8227
8228 val = rd32(&pf->hw, I40E_GLQF_FDCNT_0);
8229 fcnt_prog = (val & I40E_GLQF_FDCNT_0_GUARANT_CNT_MASK) +
8230 ((val & I40E_GLQF_FDCNT_0_BESTCNT_MASK) >>
8231 I40E_GLQF_FDCNT_0_BESTCNT_SHIFT);
8232 return fcnt_prog;
8233}
8234
01c96952
JK
8235/**
8236 * i40e_reenable_fdir_sb - Restore FDir SB capability
8237 * @pf: board private structure
8238 **/
8239static void i40e_reenable_fdir_sb(struct i40e_pf *pf)
8240{
134201ae 8241 if (test_and_clear_bit(__I40E_FD_SB_AUTO_DISABLED, pf->state))
01c96952
JK
8242 if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
8243 (I40E_DEBUG_FD & pf->hw.debug_mask))
8244 dev_info(&pf->pdev->dev, "FD Sideband/ntuple is being enabled since we have space in the table now\n");
01c96952
JK
8245}
8246
8247/**
8248 * i40e_reenable_fdir_atr - Restore FDir ATR capability
8249 * @pf: board private structure
8250 **/
8251static void i40e_reenable_fdir_atr(struct i40e_pf *pf)
8252{
134201ae 8253 if (test_and_clear_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state)) {
089915f0
JK
8254 /* ATR uses the same filtering logic as SB rules. It only
8255 * functions properly if the input set mask is at the default
8256 * settings. It is safe to restore the default input set
8257 * because there are no active TCPv4 filter rules.
8258 */
8259 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_TCP,
8260 I40E_L3_SRC_MASK | I40E_L3_DST_MASK |
8261 I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
8262
01c96952
JK
8263 if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
8264 (I40E_DEBUG_FD & pf->hw.debug_mask))
8265 dev_info(&pf->pdev->dev, "ATR is being enabled since we have space in the table and there are no conflicting ntuple rules\n");
8266 }
8267}
8268
6ac6d5a7
JK
8269/**
8270 * i40e_delete_invalid_filter - Delete an invalid FDIR filter
8271 * @pf: board private structure
8272 * @filter: FDir filter to remove
8273 */
8274static void i40e_delete_invalid_filter(struct i40e_pf *pf,
8275 struct i40e_fdir_filter *filter)
8276{
8277 /* Update counters */
8278 pf->fdir_pf_active_filters--;
8279 pf->fd_inv = 0;
8280
8281 switch (filter->flow_type) {
8282 case TCP_V4_FLOW:
8283 pf->fd_tcp4_filter_cnt--;
8284 break;
8285 case UDP_V4_FLOW:
8286 pf->fd_udp4_filter_cnt--;
8287 break;
8288 case SCTP_V4_FLOW:
8289 pf->fd_sctp4_filter_cnt--;
8290 break;
8291 case IP_USER_FLOW:
8292 switch (filter->ip4_proto) {
8293 case IPPROTO_TCP:
8294 pf->fd_tcp4_filter_cnt--;
8295 break;
8296 case IPPROTO_UDP:
8297 pf->fd_udp4_filter_cnt--;
8298 break;
8299 case IPPROTO_SCTP:
8300 pf->fd_sctp4_filter_cnt--;
8301 break;
8302 case IPPROTO_IP:
8303 pf->fd_ip4_filter_cnt--;
8304 break;
8305 }
8306 break;
8307 }
8308
8309 /* Remove the filter from the list and free memory */
8310 hlist_del(&filter->fdir_node);
8311 kfree(filter);
8312}
8313
55a5e60b
ASJ
8314/**
8315 * i40e_fdir_check_and_reenable - Function to reenabe FD ATR or SB if disabled
8316 * @pf: board private structure
8317 **/
8318void i40e_fdir_check_and_reenable(struct i40e_pf *pf)
8319{
3487b6c3 8320 struct i40e_fdir_filter *filter;
55a5e60b 8321 u32 fcnt_prog, fcnt_avail;
3487b6c3 8322 struct hlist_node *node;
55a5e60b 8323
0da36b97 8324 if (test_bit(__I40E_FD_FLUSH_REQUESTED, pf->state))
1e1be8f6
ASJ
8325 return;
8326
47994c11 8327 /* Check if we have enough room to re-enable FDir SB capability. */
04294e38 8328 fcnt_prog = i40e_get_global_fd_count(pf);
12957388 8329 fcnt_avail = pf->fdir_pf_filter_count;
1e1be8f6
ASJ
8330 if ((fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM)) ||
8331 (pf->fd_add_err == 0) ||
01c96952
JK
8332 (i40e_get_current_atr_cnt(pf) < pf->fd_atr_cnt))
8333 i40e_reenable_fdir_sb(pf);
a3417d28 8334
47994c11
JK
8335 /* We should wait for even more space before re-enabling ATR.
8336 * Additionally, we cannot enable ATR as long as we still have TCP SB
8337 * rules active.
a3417d28 8338 */
47994c11 8339 if ((fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM_FOR_ATR)) &&
01c96952
JK
8340 (pf->fd_tcp4_filter_cnt == 0))
8341 i40e_reenable_fdir_atr(pf);
3487b6c3
CW
8342
8343 /* if hw had a problem adding a filter, delete it */
8344 if (pf->fd_inv > 0) {
8345 hlist_for_each_entry_safe(filter, node,
6ac6d5a7
JK
8346 &pf->fdir_filter_list, fdir_node)
8347 if (filter->fd_id == pf->fd_inv)
8348 i40e_delete_invalid_filter(pf, filter);
3487b6c3 8349 }
55a5e60b
ASJ
8350}
8351
1e1be8f6 8352#define I40E_MIN_FD_FLUSH_INTERVAL 10
04294e38 8353#define I40E_MIN_FD_FLUSH_SB_ATR_UNSTABLE 30
1e1be8f6
ASJ
8354/**
8355 * i40e_fdir_flush_and_replay - Function to flush all FD filters and replay SB
8356 * @pf: board private structure
8357 **/
8358static void i40e_fdir_flush_and_replay(struct i40e_pf *pf)
8359{
04294e38 8360 unsigned long min_flush_time;
1e1be8f6 8361 int flush_wait_retry = 50;
04294e38
ASJ
8362 bool disable_atr = false;
8363 int fd_room;
1e1be8f6
ASJ
8364 int reg;
8365
a5fdaf34
JB
8366 if (!time_after(jiffies, pf->fd_flush_timestamp +
8367 (I40E_MIN_FD_FLUSH_INTERVAL * HZ)))
8368 return;
04294e38 8369
a5fdaf34
JB
8370 /* If the flush is happening too quick and we have mostly SB rules we
8371 * should not re-enable ATR for some time.
8372 */
8373 min_flush_time = pf->fd_flush_timestamp +
8374 (I40E_MIN_FD_FLUSH_SB_ATR_UNSTABLE * HZ);
8375 fd_room = pf->fdir_pf_filter_count - pf->fdir_pf_active_filters;
8376
8377 if (!(time_after(jiffies, min_flush_time)) &&
8378 (fd_room < I40E_FDIR_BUFFER_HEAD_ROOM_FOR_ATR)) {
8379 if (I40E_DEBUG_FD & pf->hw.debug_mask)
8380 dev_info(&pf->pdev->dev, "ATR disabled, not enough FD filter space.\n");
8381 disable_atr = true;
8382 }
8383
8384 pf->fd_flush_timestamp = jiffies;
134201ae 8385 set_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state);
a5fdaf34
JB
8386 /* flush all filters */
8387 wr32(&pf->hw, I40E_PFQF_CTL_1,
8388 I40E_PFQF_CTL_1_CLEARFDTABLE_MASK);
8389 i40e_flush(&pf->hw);
8390 pf->fd_flush_cnt++;
8391 pf->fd_add_err = 0;
8392 do {
8393 /* Check FD flush status every 5-6msec */
8394 usleep_range(5000, 6000);
8395 reg = rd32(&pf->hw, I40E_PFQF_CTL_1);
8396 if (!(reg & I40E_PFQF_CTL_1_CLEARFDTABLE_MASK))
8397 break;
8398 } while (flush_wait_retry--);
8399 if (reg & I40E_PFQF_CTL_1_CLEARFDTABLE_MASK) {
8400 dev_warn(&pf->pdev->dev, "FD table did not flush, needs more time\n");
8401 } else {
8402 /* replay sideband filters */
8403 i40e_fdir_filter_restore(pf->vsi[pf->lan_vsi]);
097dbf52 8404 if (!disable_atr && !pf->fd_tcp4_filter_cnt)
134201ae 8405 clear_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state);
0da36b97 8406 clear_bit(__I40E_FD_FLUSH_REQUESTED, pf->state);
a5fdaf34
JB
8407 if (I40E_DEBUG_FD & pf->hw.debug_mask)
8408 dev_info(&pf->pdev->dev, "FD Filter table flushed and FD-SB replayed.\n");
1e1be8f6
ASJ
8409 }
8410}
8411
8412/**
8413 * i40e_get_current_atr_count - Get the count of total FD ATR filters programmed
8414 * @pf: board private structure
8415 **/
04294e38 8416u32 i40e_get_current_atr_cnt(struct i40e_pf *pf)
1e1be8f6
ASJ
8417{
8418 return i40e_get_current_fd_count(pf) - pf->fdir_pf_active_filters;
8419}
8420
8421/* We can see up to 256 filter programming desc in transit if the filters are
8422 * being applied really fast; before we see the first
8423 * filter miss error on Rx queue 0. Accumulating enough error messages before
8424 * reacting will make sure we don't cause flush too often.
8425 */
8426#define I40E_MAX_FD_PROGRAM_ERROR 256
8427
41c445ff
JB
8428/**
8429 * i40e_fdir_reinit_subtask - Worker thread to reinit FDIR filter table
8430 * @pf: board private structure
8431 **/
8432static void i40e_fdir_reinit_subtask(struct i40e_pf *pf)
8433{
41c445ff 8434
41c445ff 8435 /* if interface is down do nothing */
9e6c9c0f 8436 if (test_bit(__I40E_DOWN, pf->state))
41c445ff 8437 return;
1e1be8f6 8438
0da36b97 8439 if (test_bit(__I40E_FD_FLUSH_REQUESTED, pf->state))
1e1be8f6
ASJ
8440 i40e_fdir_flush_and_replay(pf);
8441
55a5e60b
ASJ
8442 i40e_fdir_check_and_reenable(pf);
8443
41c445ff
JB
8444}
8445
8446/**
8447 * i40e_vsi_link_event - notify VSI of a link event
8448 * @vsi: vsi to be notified
8449 * @link_up: link up or down
8450 **/
8451static void i40e_vsi_link_event(struct i40e_vsi *vsi, bool link_up)
8452{
0da36b97 8453 if (!vsi || test_bit(__I40E_VSI_DOWN, vsi->state))
41c445ff
JB
8454 return;
8455
8456 switch (vsi->type) {
8457 case I40E_VSI_MAIN:
8458 if (!vsi->netdev || !vsi->netdev_registered)
8459 break;
8460
8461 if (link_up) {
8462 netif_carrier_on(vsi->netdev);
8463 netif_tx_wake_all_queues(vsi->netdev);
8464 } else {
8465 netif_carrier_off(vsi->netdev);
8466 netif_tx_stop_all_queues(vsi->netdev);
8467 }
8468 break;
8469
8470 case I40E_VSI_SRIOV:
41c445ff
JB
8471 case I40E_VSI_VMDQ2:
8472 case I40E_VSI_CTRL:
e3219ce6 8473 case I40E_VSI_IWARP:
41c445ff
JB
8474 case I40E_VSI_MIRROR:
8475 default:
8476 /* there is no notification for other VSIs */
8477 break;
8478 }
8479}
8480
8481/**
8482 * i40e_veb_link_event - notify elements on the veb of a link event
8483 * @veb: veb to be notified
8484 * @link_up: link up or down
8485 **/
8486static void i40e_veb_link_event(struct i40e_veb *veb, bool link_up)
8487{
8488 struct i40e_pf *pf;
8489 int i;
8490
8491 if (!veb || !veb->pf)
8492 return;
8493 pf = veb->pf;
8494
8495 /* depth first... */
8496 for (i = 0; i < I40E_MAX_VEB; i++)
8497 if (pf->veb[i] && (pf->veb[i]->uplink_seid == veb->seid))
8498 i40e_veb_link_event(pf->veb[i], link_up);
8499
8500 /* ... now the local VSIs */
505682cd 8501 for (i = 0; i < pf->num_alloc_vsi; i++)
41c445ff
JB
8502 if (pf->vsi[i] && (pf->vsi[i]->uplink_seid == veb->seid))
8503 i40e_vsi_link_event(pf->vsi[i], link_up);
8504}
8505
8506/**
8507 * i40e_link_event - Update netif_carrier status
8508 * @pf: board private structure
8509 **/
8510static void i40e_link_event(struct i40e_pf *pf)
8511{
320684cd 8512 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
fef59ddf 8513 u8 new_link_speed, old_link_speed;
a72a5abc
JB
8514 i40e_status status;
8515 bool new_link, old_link;
41c445ff 8516
1e701e09
JB
8517 /* set this to force the get_link_status call to refresh state */
8518 pf->hw.phy.get_link_info = true;
41c445ff 8519 old_link = (pf->hw.phy.link_info_old.link_info & I40E_AQ_LINK_UP);
a72a5abc 8520 status = i40e_get_link_status(&pf->hw, &new_link);
ae136708
HR
8521
8522 /* On success, disable temp link polling */
8523 if (status == I40E_SUCCESS) {
0605c45c 8524 clear_bit(__I40E_TEMP_LINK_POLLING, pf->state);
ae136708
HR
8525 } else {
8526 /* Enable link polling temporarily until i40e_get_link_status
8527 * returns I40E_SUCCESS
8528 */
0605c45c 8529 set_bit(__I40E_TEMP_LINK_POLLING, pf->state);
a72a5abc
JB
8530 dev_dbg(&pf->pdev->dev, "couldn't get link state, status: %d\n",
8531 status);
8532 return;
8533 }
8534
fef59ddf
CS
8535 old_link_speed = pf->hw.phy.link_info_old.link_speed;
8536 new_link_speed = pf->hw.phy.link_info.link_speed;
41c445ff 8537
1e701e09 8538 if (new_link == old_link &&
fef59ddf 8539 new_link_speed == old_link_speed &&
0da36b97 8540 (test_bit(__I40E_VSI_DOWN, vsi->state) ||
320684cd 8541 new_link == netif_carrier_ok(vsi->netdev)))
41c445ff 8542 return;
320684cd 8543
9a03449d 8544 i40e_print_link_message(vsi, new_link);
41c445ff
JB
8545
8546 /* Notify the base of the switch tree connected to
8547 * the link. Floating VEBs are not notified.
8548 */
8549 if (pf->lan_veb != I40E_NO_VEB && pf->veb[pf->lan_veb])
8550 i40e_veb_link_event(pf->veb[pf->lan_veb], new_link);
8551 else
320684cd 8552 i40e_vsi_link_event(vsi, new_link);
41c445ff
JB
8553
8554 if (pf->vf)
8555 i40e_vc_notify_link_state(pf);
beb0dff1
JK
8556
8557 if (pf->flags & I40E_FLAG_PTP)
8558 i40e_ptp_set_increment(pf);
41c445ff
JB
8559}
8560
41c445ff 8561/**
21536717 8562 * i40e_watchdog_subtask - periodic checks not using event driven response
41c445ff
JB
8563 * @pf: board private structure
8564 **/
8565static void i40e_watchdog_subtask(struct i40e_pf *pf)
8566{
8567 int i;
8568
8569 /* if interface is down do nothing */
9e6c9c0f 8570 if (test_bit(__I40E_DOWN, pf->state) ||
0da36b97 8571 test_bit(__I40E_CONFIG_BUSY, pf->state))
41c445ff
JB
8572 return;
8573
21536717
SN
8574 /* make sure we don't do these things too often */
8575 if (time_before(jiffies, (pf->service_timer_previous +
8576 pf->service_timer_period)))
8577 return;
8578 pf->service_timer_previous = jiffies;
8579
ae136708 8580 if ((pf->flags & I40E_FLAG_LINK_POLLING_ENABLED) ||
0605c45c 8581 test_bit(__I40E_TEMP_LINK_POLLING, pf->state))
9ac77266 8582 i40e_link_event(pf);
21536717 8583
41c445ff
JB
8584 /* Update the stats for active netdevs so the network stack
8585 * can look at updated numbers whenever it cares to
8586 */
505682cd 8587 for (i = 0; i < pf->num_alloc_vsi; i++)
41c445ff
JB
8588 if (pf->vsi[i] && pf->vsi[i]->netdev)
8589 i40e_update_stats(pf->vsi[i]);
8590
d1a8d275
ASJ
8591 if (pf->flags & I40E_FLAG_VEB_STATS_ENABLED) {
8592 /* Update the stats for the active switching components */
8593 for (i = 0; i < I40E_MAX_VEB; i++)
8594 if (pf->veb[i])
8595 i40e_update_veb_stats(pf->veb[i]);
8596 }
beb0dff1 8597
61189556 8598 i40e_ptp_rx_hang(pf);
0bc0706b 8599 i40e_ptp_tx_hang(pf);
41c445ff
JB
8600}
8601
8602/**
8603 * i40e_reset_subtask - Set up for resetting the device and driver
8604 * @pf: board private structure
8605 **/
8606static void i40e_reset_subtask(struct i40e_pf *pf)
8607{
8608 u32 reset_flags = 0;
8609
0da36b97 8610 if (test_bit(__I40E_REINIT_REQUESTED, pf->state)) {
75f5cea9 8611 reset_flags |= BIT(__I40E_REINIT_REQUESTED);
0da36b97 8612 clear_bit(__I40E_REINIT_REQUESTED, pf->state);
41c445ff 8613 }
0da36b97 8614 if (test_bit(__I40E_PF_RESET_REQUESTED, pf->state)) {
75f5cea9 8615 reset_flags |= BIT(__I40E_PF_RESET_REQUESTED);
0da36b97 8616 clear_bit(__I40E_PF_RESET_REQUESTED, pf->state);
41c445ff 8617 }
0da36b97 8618 if (test_bit(__I40E_CORE_RESET_REQUESTED, pf->state)) {
75f5cea9 8619 reset_flags |= BIT(__I40E_CORE_RESET_REQUESTED);
0da36b97 8620 clear_bit(__I40E_CORE_RESET_REQUESTED, pf->state);
41c445ff 8621 }
0da36b97 8622 if (test_bit(__I40E_GLOBAL_RESET_REQUESTED, pf->state)) {
75f5cea9 8623 reset_flags |= BIT(__I40E_GLOBAL_RESET_REQUESTED);
0da36b97 8624 clear_bit(__I40E_GLOBAL_RESET_REQUESTED, pf->state);
41c445ff 8625 }
9e6c9c0f
MR
8626 if (test_bit(__I40E_DOWN_REQUESTED, pf->state)) {
8627 reset_flags |= BIT(__I40E_DOWN_REQUESTED);
8628 clear_bit(__I40E_DOWN_REQUESTED, pf->state);
b5d06f05 8629 }
41c445ff
JB
8630
8631 /* If there's a recovery already waiting, it takes
8632 * precedence before starting a new reset sequence.
8633 */
0da36b97 8634 if (test_bit(__I40E_RESET_INTR_RECEIVED, pf->state)) {
373149fc
MS
8635 i40e_prep_for_reset(pf, false);
8636 i40e_reset(pf);
8637 i40e_rebuild(pf, false, false);
41c445ff
JB
8638 }
8639
8640 /* If we're already down or resetting, just bail */
8641 if (reset_flags &&
9e6c9c0f 8642 !test_bit(__I40E_DOWN, pf->state) &&
0da36b97 8643 !test_bit(__I40E_CONFIG_BUSY, pf->state)) {
dfc4ff64 8644 i40e_do_reset(pf, reset_flags, false);
373149fc 8645 }
41c445ff
JB
8646}
8647
8648/**
8649 * i40e_handle_link_event - Handle link event
8650 * @pf: board private structure
8651 * @e: event info posted on ARQ
8652 **/
8653static void i40e_handle_link_event(struct i40e_pf *pf,
8654 struct i40e_arq_event_info *e)
8655{
41c445ff
JB
8656 struct i40e_aqc_get_link_status *status =
8657 (struct i40e_aqc_get_link_status *)&e->desc.params.raw;
41c445ff 8658
1e701e09
JB
8659 /* Do a new status request to re-enable LSE reporting
8660 * and load new status information into the hw struct
8661 * This completely ignores any state information
8662 * in the ARQ event info, instead choosing to always
8663 * issue the AQ update link status command.
8664 */
8665 i40e_link_event(pf);
8666
9a858178
FS
8667 /* Check if module meets thermal requirements */
8668 if (status->phy_type == I40E_PHY_TYPE_NOT_SUPPORTED_HIGH_TEMP) {
7b592f61 8669 dev_err(&pf->pdev->dev,
9a858178
FS
8670 "Rx/Tx is disabled on this device because the module does not meet thermal requirements.\n");
8671 dev_err(&pf->pdev->dev,
8672 "Refer to the Intel(R) Ethernet Adapters and Devices User Guide for a list of supported modules.\n");
8673 } else {
8674 /* check for unqualified module, if link is down, suppress
8675 * the message if link was forced to be down.
8676 */
8677 if ((status->link_info & I40E_AQ_MEDIA_AVAILABLE) &&
8678 (!(status->an_info & I40E_AQ_QUALIFIED_MODULE)) &&
8679 (!(status->link_info & I40E_AQ_LINK_UP)) &&
8680 (!(pf->flags & I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED))) {
8681 dev_err(&pf->pdev->dev,
8682 "Rx/Tx is disabled on this device because an unsupported SFP module type was detected.\n");
8683 dev_err(&pf->pdev->dev,
8684 "Refer to the Intel(R) Ethernet Adapters and Devices User Guide for a list of supported modules.\n");
8685 }
8686 }
41c445ff
JB
8687}
8688
8689/**
8690 * i40e_clean_adminq_subtask - Clean the AdminQ rings
8691 * @pf: board private structure
8692 **/
8693static void i40e_clean_adminq_subtask(struct i40e_pf *pf)
8694{
8695 struct i40e_arq_event_info event;
8696 struct i40e_hw *hw = &pf->hw;
8697 u16 pending, i = 0;
8698 i40e_status ret;
8699 u16 opcode;
86df242b 8700 u32 oldval;
41c445ff
JB
8701 u32 val;
8702
a316f651 8703 /* Do not run clean AQ when PF reset fails */
0da36b97 8704 if (test_bit(__I40E_RESET_FAILED, pf->state))
a316f651
ASJ
8705 return;
8706
86df242b
SN
8707 /* check for error indications */
8708 val = rd32(&pf->hw, pf->hw.aq.arq.len);
8709 oldval = val;
8710 if (val & I40E_PF_ARQLEN_ARQVFE_MASK) {
75eb73c1
MW
8711 if (hw->debug_mask & I40E_DEBUG_AQ)
8712 dev_info(&pf->pdev->dev, "ARQ VF Error detected\n");
86df242b
SN
8713 val &= ~I40E_PF_ARQLEN_ARQVFE_MASK;
8714 }
8715 if (val & I40E_PF_ARQLEN_ARQOVFL_MASK) {
75eb73c1
MW
8716 if (hw->debug_mask & I40E_DEBUG_AQ)
8717 dev_info(&pf->pdev->dev, "ARQ Overflow Error detected\n");
86df242b 8718 val &= ~I40E_PF_ARQLEN_ARQOVFL_MASK;
1d0a4ada 8719 pf->arq_overflows++;
86df242b
SN
8720 }
8721 if (val & I40E_PF_ARQLEN_ARQCRIT_MASK) {
75eb73c1
MW
8722 if (hw->debug_mask & I40E_DEBUG_AQ)
8723 dev_info(&pf->pdev->dev, "ARQ Critical Error detected\n");
86df242b
SN
8724 val &= ~I40E_PF_ARQLEN_ARQCRIT_MASK;
8725 }
8726 if (oldval != val)
8727 wr32(&pf->hw, pf->hw.aq.arq.len, val);
8728
8729 val = rd32(&pf->hw, pf->hw.aq.asq.len);
8730 oldval = val;
8731 if (val & I40E_PF_ATQLEN_ATQVFE_MASK) {
75eb73c1
MW
8732 if (pf->hw.debug_mask & I40E_DEBUG_AQ)
8733 dev_info(&pf->pdev->dev, "ASQ VF Error detected\n");
86df242b
SN
8734 val &= ~I40E_PF_ATQLEN_ATQVFE_MASK;
8735 }
8736 if (val & I40E_PF_ATQLEN_ATQOVFL_MASK) {
75eb73c1
MW
8737 if (pf->hw.debug_mask & I40E_DEBUG_AQ)
8738 dev_info(&pf->pdev->dev, "ASQ Overflow Error detected\n");
86df242b
SN
8739 val &= ~I40E_PF_ATQLEN_ATQOVFL_MASK;
8740 }
8741 if (val & I40E_PF_ATQLEN_ATQCRIT_MASK) {
75eb73c1
MW
8742 if (pf->hw.debug_mask & I40E_DEBUG_AQ)
8743 dev_info(&pf->pdev->dev, "ASQ Critical Error detected\n");
86df242b
SN
8744 val &= ~I40E_PF_ATQLEN_ATQCRIT_MASK;
8745 }
8746 if (oldval != val)
8747 wr32(&pf->hw, pf->hw.aq.asq.len, val);
8748
1001dc37
MW
8749 event.buf_len = I40E_MAX_AQ_BUF_SIZE;
8750 event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
41c445ff
JB
8751 if (!event.msg_buf)
8752 return;
8753
8754 do {
8755 ret = i40e_clean_arq_element(hw, &event, &pending);
56497978 8756 if (ret == I40E_ERR_ADMIN_QUEUE_NO_WORK)
41c445ff 8757 break;
56497978 8758 else if (ret) {
41c445ff
JB
8759 dev_info(&pf->pdev->dev, "ARQ event error %d\n", ret);
8760 break;
8761 }
8762
8763 opcode = le16_to_cpu(event.desc.opcode);
8764 switch (opcode) {
8765
8766 case i40e_aqc_opc_get_link_status:
8767 i40e_handle_link_event(pf, &event);
8768 break;
8769 case i40e_aqc_opc_send_msg_to_pf:
8770 ret = i40e_vc_process_vf_msg(pf,
8771 le16_to_cpu(event.desc.retval),
8772 le32_to_cpu(event.desc.cookie_high),
8773 le32_to_cpu(event.desc.cookie_low),
8774 event.msg_buf,
1001dc37 8775 event.msg_len);
41c445ff
JB
8776 break;
8777 case i40e_aqc_opc_lldp_update_mib:
69bfb110 8778 dev_dbg(&pf->pdev->dev, "ARQ: Update LLDP MIB event received\n");
4e3b35b0
NP
8779#ifdef CONFIG_I40E_DCB
8780 rtnl_lock();
8781 ret = i40e_handle_lldp_event(pf, &event);
8782 rtnl_unlock();
8783#endif /* CONFIG_I40E_DCB */
41c445ff
JB
8784 break;
8785 case i40e_aqc_opc_event_lan_overflow:
69bfb110 8786 dev_dbg(&pf->pdev->dev, "ARQ LAN queue overflow event received\n");
41c445ff
JB
8787 i40e_handle_lan_overflow_event(pf, &event);
8788 break;
0467bc91
SN
8789 case i40e_aqc_opc_send_msg_to_peer:
8790 dev_info(&pf->pdev->dev, "ARQ: Msg from other pf\n");
8791 break;
91a0f930
SN
8792 case i40e_aqc_opc_nvm_erase:
8793 case i40e_aqc_opc_nvm_update:
00ada50d 8794 case i40e_aqc_opc_oem_post_update:
6e93d0c9
SN
8795 i40e_debug(&pf->hw, I40E_DEBUG_NVM,
8796 "ARQ NVM operation 0x%04x completed\n",
8797 opcode);
91a0f930 8798 break;
41c445ff
JB
8799 default:
8800 dev_info(&pf->pdev->dev,
56e5ca68 8801 "ARQ: Unknown event 0x%04x ignored\n",
0467bc91 8802 opcode);
41c445ff
JB
8803 break;
8804 }
1fca3265
CB
8805 } while (i++ < pf->adminq_work_limit);
8806
8807 if (i < pf->adminq_work_limit)
0da36b97 8808 clear_bit(__I40E_ADMINQ_EVENT_PENDING, pf->state);
41c445ff 8809
41c445ff
JB
8810 /* re-enable Admin queue interrupt cause */
8811 val = rd32(hw, I40E_PFINT_ICR0_ENA);
8812 val |= I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
8813 wr32(hw, I40E_PFINT_ICR0_ENA, val);
8814 i40e_flush(hw);
8815
8816 kfree(event.msg_buf);
8817}
8818
4eb3f768
SN
8819/**
8820 * i40e_verify_eeprom - make sure eeprom is good to use
8821 * @pf: board private structure
8822 **/
8823static void i40e_verify_eeprom(struct i40e_pf *pf)
8824{
8825 int err;
8826
8827 err = i40e_diag_eeprom_test(&pf->hw);
8828 if (err) {
8829 /* retry in case of garbage read */
8830 err = i40e_diag_eeprom_test(&pf->hw);
8831 if (err) {
8832 dev_info(&pf->pdev->dev, "eeprom check failed (%d), Tx/Rx traffic disabled\n",
8833 err);
0da36b97 8834 set_bit(__I40E_BAD_EEPROM, pf->state);
4eb3f768
SN
8835 }
8836 }
8837
0da36b97 8838 if (!err && test_bit(__I40E_BAD_EEPROM, pf->state)) {
4eb3f768 8839 dev_info(&pf->pdev->dev, "eeprom check passed, Tx/Rx traffic enabled\n");
0da36b97 8840 clear_bit(__I40E_BAD_EEPROM, pf->state);
4eb3f768
SN
8841 }
8842}
8843
386a0afa
AA
8844/**
8845 * i40e_enable_pf_switch_lb
b40c82e6 8846 * @pf: pointer to the PF structure
386a0afa
AA
8847 *
8848 * enable switch loop back or die - no point in a return value
8849 **/
8850static void i40e_enable_pf_switch_lb(struct i40e_pf *pf)
8851{
8852 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
8853 struct i40e_vsi_context ctxt;
f1c7e72e 8854 int ret;
386a0afa
AA
8855
8856 ctxt.seid = pf->main_vsi_seid;
8857 ctxt.pf_num = pf->hw.pf_id;
8858 ctxt.vf_num = 0;
f1c7e72e
SN
8859 ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
8860 if (ret) {
386a0afa 8861 dev_info(&pf->pdev->dev,
f1c7e72e
SN
8862 "couldn't get PF vsi config, err %s aq_err %s\n",
8863 i40e_stat_str(&pf->hw, ret),
8864 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
386a0afa
AA
8865 return;
8866 }
8867 ctxt.flags = I40E_AQ_VSI_TYPE_PF;
8868 ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
8869 ctxt.info.switch_id |= cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
8870
f1c7e72e
SN
8871 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
8872 if (ret) {
386a0afa 8873 dev_info(&pf->pdev->dev,
f1c7e72e
SN
8874 "update vsi switch failed, err %s aq_err %s\n",
8875 i40e_stat_str(&pf->hw, ret),
8876 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
386a0afa
AA
8877 }
8878}
8879
8880/**
8881 * i40e_disable_pf_switch_lb
b40c82e6 8882 * @pf: pointer to the PF structure
386a0afa
AA
8883 *
8884 * disable switch loop back or die - no point in a return value
8885 **/
8886static void i40e_disable_pf_switch_lb(struct i40e_pf *pf)
8887{
8888 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
8889 struct i40e_vsi_context ctxt;
f1c7e72e 8890 int ret;
386a0afa
AA
8891
8892 ctxt.seid = pf->main_vsi_seid;
8893 ctxt.pf_num = pf->hw.pf_id;
8894 ctxt.vf_num = 0;
f1c7e72e
SN
8895 ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
8896 if (ret) {
386a0afa 8897 dev_info(&pf->pdev->dev,
f1c7e72e
SN
8898 "couldn't get PF vsi config, err %s aq_err %s\n",
8899 i40e_stat_str(&pf->hw, ret),
8900 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
386a0afa
AA
8901 return;
8902 }
8903 ctxt.flags = I40E_AQ_VSI_TYPE_PF;
8904 ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
8905 ctxt.info.switch_id &= ~cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
8906
f1c7e72e
SN
8907 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
8908 if (ret) {
386a0afa 8909 dev_info(&pf->pdev->dev,
f1c7e72e
SN
8910 "update vsi switch failed, err %s aq_err %s\n",
8911 i40e_stat_str(&pf->hw, ret),
8912 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
386a0afa
AA
8913 }
8914}
8915
51616018
NP
8916/**
8917 * i40e_config_bridge_mode - Configure the HW bridge mode
8918 * @veb: pointer to the bridge instance
8919 *
8920 * Configure the loop back mode for the LAN VSI that is downlink to the
8921 * specified HW bridge instance. It is expected this function is called
8922 * when a new HW bridge is instantiated.
8923 **/
8924static void i40e_config_bridge_mode(struct i40e_veb *veb)
8925{
8926 struct i40e_pf *pf = veb->pf;
8927
6dec1017
SN
8928 if (pf->hw.debug_mask & I40E_DEBUG_LAN)
8929 dev_info(&pf->pdev->dev, "enabling bridge mode: %s\n",
8930 veb->bridge_mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
51616018
NP
8931 if (veb->bridge_mode & BRIDGE_MODE_VEPA)
8932 i40e_disable_pf_switch_lb(pf);
8933 else
8934 i40e_enable_pf_switch_lb(pf);
8935}
8936
41c445ff
JB
8937/**
8938 * i40e_reconstitute_veb - rebuild the VEB and anything connected to it
8939 * @veb: pointer to the VEB instance
8940 *
8941 * This is a recursive function that first builds the attached VSIs then
8942 * recurses in to build the next layer of VEB. We track the connections
8943 * through our own index numbers because the seid's from the HW could
8944 * change across the reset.
8945 **/
8946static int i40e_reconstitute_veb(struct i40e_veb *veb)
8947{
8948 struct i40e_vsi *ctl_vsi = NULL;
8949 struct i40e_pf *pf = veb->pf;
8950 int v, veb_idx;
8951 int ret;
8952
8953 /* build VSI that owns this VEB, temporarily attached to base VEB */
505682cd 8954 for (v = 0; v < pf->num_alloc_vsi && !ctl_vsi; v++) {
41c445ff
JB
8955 if (pf->vsi[v] &&
8956 pf->vsi[v]->veb_idx == veb->idx &&
8957 pf->vsi[v]->flags & I40E_VSI_FLAG_VEB_OWNER) {
8958 ctl_vsi = pf->vsi[v];
8959 break;
8960 }
8961 }
8962 if (!ctl_vsi) {
8963 dev_info(&pf->pdev->dev,
8964 "missing owner VSI for veb_idx %d\n", veb->idx);
8965 ret = -ENOENT;
8966 goto end_reconstitute;
8967 }
8968 if (ctl_vsi != pf->vsi[pf->lan_vsi])
8969 ctl_vsi->uplink_seid = pf->vsi[pf->lan_vsi]->uplink_seid;
8970 ret = i40e_add_vsi(ctl_vsi);
8971 if (ret) {
8972 dev_info(&pf->pdev->dev,
f1c7e72e
SN
8973 "rebuild of veb_idx %d owner VSI failed: %d\n",
8974 veb->idx, ret);
41c445ff
JB
8975 goto end_reconstitute;
8976 }
8977 i40e_vsi_reset_stats(ctl_vsi);
8978
8979 /* create the VEB in the switch and move the VSI onto the VEB */
8980 ret = i40e_add_veb(veb, ctl_vsi);
8981 if (ret)
8982 goto end_reconstitute;
8983
fc60861e
ASJ
8984 if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED)
8985 veb->bridge_mode = BRIDGE_MODE_VEB;
8986 else
8987 veb->bridge_mode = BRIDGE_MODE_VEPA;
51616018 8988 i40e_config_bridge_mode(veb);
b64ba084 8989
41c445ff 8990 /* create the remaining VSIs attached to this VEB */
505682cd 8991 for (v = 0; v < pf->num_alloc_vsi; v++) {
41c445ff
JB
8992 if (!pf->vsi[v] || pf->vsi[v] == ctl_vsi)
8993 continue;
8994
8995 if (pf->vsi[v]->veb_idx == veb->idx) {
8996 struct i40e_vsi *vsi = pf->vsi[v];
6995b36c 8997
41c445ff
JB
8998 vsi->uplink_seid = veb->seid;
8999 ret = i40e_add_vsi(vsi);
9000 if (ret) {
9001 dev_info(&pf->pdev->dev,
9002 "rebuild of vsi_idx %d failed: %d\n",
9003 v, ret);
9004 goto end_reconstitute;
9005 }
9006 i40e_vsi_reset_stats(vsi);
9007 }
9008 }
9009
9010 /* create any VEBs attached to this VEB - RECURSION */
9011 for (veb_idx = 0; veb_idx < I40E_MAX_VEB; veb_idx++) {
9012 if (pf->veb[veb_idx] && pf->veb[veb_idx]->veb_idx == veb->idx) {
9013 pf->veb[veb_idx]->uplink_seid = veb->seid;
9014 ret = i40e_reconstitute_veb(pf->veb[veb_idx]);
9015 if (ret)
9016 break;
9017 }
9018 }
9019
9020end_reconstitute:
9021 return ret;
9022}
9023
9024/**
9025 * i40e_get_capabilities - get info about the HW
9026 * @pf: the PF struct
9027 **/
2f4b411a
AN
9028static int i40e_get_capabilities(struct i40e_pf *pf,
9029 enum i40e_admin_queue_opc list_type)
41c445ff
JB
9030{
9031 struct i40e_aqc_list_capabilities_element_resp *cap_buf;
9032 u16 data_size;
9033 int buf_len;
9034 int err;
9035
9036 buf_len = 40 * sizeof(struct i40e_aqc_list_capabilities_element_resp);
9037 do {
9038 cap_buf = kzalloc(buf_len, GFP_KERNEL);
9039 if (!cap_buf)
9040 return -ENOMEM;
9041
9042 /* this loads the data into the hw struct for us */
9043 err = i40e_aq_discover_capabilities(&pf->hw, cap_buf, buf_len,
2f4b411a
AN
9044 &data_size, list_type,
9045 NULL);
41c445ff
JB
9046 /* data loaded, buffer no longer needed */
9047 kfree(cap_buf);
9048
9049 if (pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOMEM) {
9050 /* retry with a larger buffer */
9051 buf_len = data_size;
9052 } else if (pf->hw.aq.asq_last_status != I40E_AQ_RC_OK) {
9053 dev_info(&pf->pdev->dev,
f1c7e72e
SN
9054 "capability discovery failed, err %s aq_err %s\n",
9055 i40e_stat_str(&pf->hw, err),
9056 i40e_aq_str(&pf->hw,
9057 pf->hw.aq.asq_last_status));
41c445ff
JB
9058 return -ENODEV;
9059 }
9060 } while (err);
9061
2f4b411a
AN
9062 if (pf->hw.debug_mask & I40E_DEBUG_USER) {
9063 if (list_type == i40e_aqc_opc_list_func_capabilities) {
9064 dev_info(&pf->pdev->dev,
9065 "pf=%d, num_vfs=%d, msix_pf=%d, msix_vf=%d, fd_g=%d, fd_b=%d, pf_max_q=%d num_vsi=%d\n",
9066 pf->hw.pf_id, pf->hw.func_caps.num_vfs,
9067 pf->hw.func_caps.num_msix_vectors,
9068 pf->hw.func_caps.num_msix_vectors_vf,
9069 pf->hw.func_caps.fd_filters_guaranteed,
9070 pf->hw.func_caps.fd_filters_best_effort,
9071 pf->hw.func_caps.num_tx_qp,
9072 pf->hw.func_caps.num_vsis);
9073 } else if (list_type == i40e_aqc_opc_list_dev_capabilities) {
9074 dev_info(&pf->pdev->dev,
9075 "switch_mode=0x%04x, function_valid=0x%08x\n",
9076 pf->hw.dev_caps.switch_mode,
9077 pf->hw.dev_caps.valid_functions);
9078 dev_info(&pf->pdev->dev,
9079 "SR-IOV=%d, num_vfs for all function=%u\n",
9080 pf->hw.dev_caps.sr_iov_1_1,
9081 pf->hw.dev_caps.num_vfs);
9082 dev_info(&pf->pdev->dev,
9083 "num_vsis=%u, num_rx:%u, num_tx=%u\n",
9084 pf->hw.dev_caps.num_vsis,
9085 pf->hw.dev_caps.num_rx_qp,
9086 pf->hw.dev_caps.num_tx_qp);
9087 }
9088 }
9089 if (list_type == i40e_aqc_opc_list_func_capabilities) {
7134f9ce
JB
9090#define DEF_NUM_VSI (1 + (pf->hw.func_caps.fcoe ? 1 : 0) \
9091 + pf->hw.func_caps.num_vfs)
2f4b411a
AN
9092 if (pf->hw.revision_id == 0 &&
9093 pf->hw.func_caps.num_vsis < DEF_NUM_VSI) {
9094 dev_info(&pf->pdev->dev,
9095 "got num_vsis %d, setting num_vsis to %d\n",
9096 pf->hw.func_caps.num_vsis, DEF_NUM_VSI);
9097 pf->hw.func_caps.num_vsis = DEF_NUM_VSI;
9098 }
7134f9ce 9099 }
41c445ff
JB
9100 return 0;
9101}
9102
cbf61325
ASJ
9103static int i40e_vsi_clear(struct i40e_vsi *vsi);
9104
41c445ff 9105/**
cbf61325 9106 * i40e_fdir_sb_setup - initialize the Flow Director resources for Sideband
41c445ff
JB
9107 * @pf: board private structure
9108 **/
cbf61325 9109static void i40e_fdir_sb_setup(struct i40e_pf *pf)
41c445ff
JB
9110{
9111 struct i40e_vsi *vsi;
41c445ff 9112
407e063c
JB
9113 /* quick workaround for an NVM issue that leaves a critical register
9114 * uninitialized
9115 */
9116 if (!rd32(&pf->hw, I40E_GLQF_HKEY(0))) {
9117 static const u32 hkey[] = {
9118 0xe640d33f, 0xcdfe98ab, 0x73fa7161, 0x0d7a7d36,
9119 0xeacb7d61, 0xaa4f05b6, 0x9c5c89ed, 0xfc425ddb,
9120 0xa4654832, 0xfc7461d4, 0x8f827619, 0xf5c63c21,
9121 0x95b3a76d};
4b816446 9122 int i;
407e063c
JB
9123
9124 for (i = 0; i <= I40E_GLQF_HKEY_MAX_INDEX; i++)
9125 wr32(&pf->hw, I40E_GLQF_HKEY(i), hkey[i]);
9126 }
9127
cbf61325 9128 if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
41c445ff
JB
9129 return;
9130
cbf61325 9131 /* find existing VSI and see if it needs configuring */
4b816446 9132 vsi = i40e_find_vsi_by_type(pf, I40E_VSI_FDIR);
cbf61325
ASJ
9133
9134 /* create a new VSI if none exists */
41c445ff 9135 if (!vsi) {
cbf61325
ASJ
9136 vsi = i40e_vsi_setup(pf, I40E_VSI_FDIR,
9137 pf->vsi[pf->lan_vsi]->seid, 0);
41c445ff
JB
9138 if (!vsi) {
9139 dev_info(&pf->pdev->dev, "Couldn't create FDir VSI\n");
8a9eb7d3 9140 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
2f4b411a 9141 pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
8a9eb7d3 9142 return;
41c445ff 9143 }
cbf61325 9144 }
41c445ff 9145
8a9eb7d3 9146 i40e_vsi_setup_irqhandler(vsi, i40e_fdir_clean_ring);
41c445ff
JB
9147}
9148
9149/**
9150 * i40e_fdir_teardown - release the Flow Director resources
9151 * @pf: board private structure
9152 **/
9153static void i40e_fdir_teardown(struct i40e_pf *pf)
9154{
4b816446 9155 struct i40e_vsi *vsi;
41c445ff 9156
17a73f6b 9157 i40e_fdir_filter_exit(pf);
4b816446
AD
9158 vsi = i40e_find_vsi_by_type(pf, I40E_VSI_FDIR);
9159 if (vsi)
9160 i40e_vsi_release(vsi);
41c445ff
JB
9161}
9162
2f4b411a
AN
9163/**
9164 * i40e_rebuild_cloud_filters - Rebuilds cloud filters for VSIs
9165 * @vsi: PF main vsi
9166 * @seid: seid of main or channel VSIs
9167 *
9168 * Rebuilds cloud filters associated with main VSI and channel VSIs if they
9169 * existed before reset
9170 **/
9171static int i40e_rebuild_cloud_filters(struct i40e_vsi *vsi, u16 seid)
9172{
9173 struct i40e_cloud_filter *cfilter;
9174 struct i40e_pf *pf = vsi->back;
9175 struct hlist_node *node;
9176 i40e_status ret;
9177
9178 /* Add cloud filters back if they exist */
9179 hlist_for_each_entry_safe(cfilter, node, &pf->cloud_filter_list,
9180 cloud_node) {
9181 if (cfilter->seid != seid)
9182 continue;
9183
9184 if (cfilter->dst_port)
9185 ret = i40e_add_del_cloud_filter_big_buf(vsi, cfilter,
9186 true);
9187 else
9188 ret = i40e_add_del_cloud_filter(vsi, cfilter, true);
cbf61325 9189
2f4b411a
AN
9190 if (ret) {
9191 dev_dbg(&pf->pdev->dev,
9192 "Failed to rebuild cloud filter, err %s aq_err %s\n",
9193 i40e_stat_str(&pf->hw, ret),
9194 i40e_aq_str(&pf->hw,
9195 pf->hw.aq.asq_last_status));
9196 return ret;
41c445ff 9197 }
cbf61325 9198 }
2f4b411a 9199 return 0;
41c445ff
JB
9200}
9201
9202/**
8f88b303
AN
9203 * i40e_rebuild_channels - Rebuilds channel VSIs if they existed before reset
9204 * @vsi: PF main vsi
9205 *
9206 * Rebuilds channel VSIs if they existed before reset
41c445ff 9207 **/
8f88b303 9208static int i40e_rebuild_channels(struct i40e_vsi *vsi)
41c445ff 9209{
8f88b303
AN
9210 struct i40e_channel *ch, *ch_tmp;
9211 i40e_status ret;
41c445ff 9212
8f88b303
AN
9213 if (list_empty(&vsi->ch_list))
9214 return 0;
9215
9216 list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list, list) {
9217 if (!ch->initialized)
9218 break;
9219 /* Proceed with creation of channel (VMDq2) VSI */
9220 ret = i40e_add_channel(vsi->back, vsi->uplink_seid, ch);
9221 if (ret) {
9222 dev_info(&vsi->back->pdev->dev,
9223 "failed to rebuild channels using uplink_seid %u\n",
9224 vsi->uplink_seid);
9225 return ret;
9226 }
bbf0bdd4
AN
9227 /* Reconfigure TX queues using QTX_CTL register */
9228 ret = i40e_channel_config_tx_ring(vsi->back, vsi, ch);
9229 if (ret) {
9230 dev_info(&vsi->back->pdev->dev,
9231 "failed to configure TX rings for channel %u\n",
9232 ch->seid);
9233 return ret;
9234 }
9235 /* update 'next_base_queue' */
9236 vsi->next_base_queue = vsi->next_base_queue +
9237 ch->num_queue_pairs;
2027d4de 9238 if (ch->max_tx_rate) {
6c32e0d9
AB
9239 u64 credits = ch->max_tx_rate;
9240
2027d4de
AN
9241 if (i40e_set_bw_limit(vsi, ch->seid,
9242 ch->max_tx_rate))
9243 return -EINVAL;
9244
6c32e0d9 9245 do_div(credits, I40E_BW_CREDIT_DIVISOR);
2027d4de
AN
9246 dev_dbg(&vsi->back->pdev->dev,
9247 "Set tx rate of %llu Mbps (count of 50Mbps %llu) for vsi->seid %u\n",
9248 ch->max_tx_rate,
6c32e0d9 9249 credits,
2027d4de
AN
9250 ch->seid);
9251 }
2f4b411a
AN
9252 ret = i40e_rebuild_cloud_filters(vsi, ch->seid);
9253 if (ret) {
9254 dev_dbg(&vsi->back->pdev->dev,
9255 "Failed to rebuild cloud filters for channel VSI %u\n",
9256 ch->seid);
9257 return ret;
9258 }
8f88b303
AN
9259 }
9260 return 0;
41c445ff
JB
9261}
9262
9263/**
f650a38b 9264 * i40e_prep_for_reset - prep for the core to reset
41c445ff 9265 * @pf: board private structure
373149fc
MS
9266 * @lock_acquired: indicates whether or not the lock has been acquired
9267 * before this function was called.
41c445ff 9268 *
b40c82e6 9269 * Close up the VFs and other things in prep for PF Reset.
f650a38b 9270 **/
373149fc 9271static void i40e_prep_for_reset(struct i40e_pf *pf, bool lock_acquired)
41c445ff 9272{
41c445ff 9273 struct i40e_hw *hw = &pf->hw;
60442dea 9274 i40e_status ret = 0;
41c445ff
JB
9275 u32 v;
9276
0da36b97
JK
9277 clear_bit(__I40E_RESET_INTR_RECEIVED, pf->state);
9278 if (test_and_set_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
23cfbe07 9279 return;
d3ce5734
MW
9280 if (i40e_check_asq_alive(&pf->hw))
9281 i40e_vc_notify_reset(pf);
41c445ff 9282
69bfb110 9283 dev_dbg(&pf->pdev->dev, "Tearing down internal switch for reset\n");
41c445ff 9284
41c445ff 9285 /* quiesce the VSIs and their queues that are not already DOWN */
373149fc
MS
9286 /* pf_quiesce_all_vsi modifies netdev structures -rtnl_lock needed */
9287 if (!lock_acquired)
9288 rtnl_lock();
41c445ff 9289 i40e_pf_quiesce_all_vsi(pf);
373149fc
MS
9290 if (!lock_acquired)
9291 rtnl_unlock();
41c445ff 9292
505682cd 9293 for (v = 0; v < pf->num_alloc_vsi; v++) {
41c445ff
JB
9294 if (pf->vsi[v])
9295 pf->vsi[v]->seid = 0;
9296 }
9297
9298 i40e_shutdown_adminq(&pf->hw);
9299
f650a38b 9300 /* call shutdown HMC */
60442dea
SN
9301 if (hw->hmc.hmc_obj) {
9302 ret = i40e_shutdown_lan_hmc(hw);
23cfbe07 9303 if (ret)
60442dea
SN
9304 dev_warn(&pf->pdev->dev,
9305 "shutdown_lan_hmc failed: %d\n", ret);
f650a38b 9306 }
f650a38b
ASJ
9307}
9308
44033fac
JB
9309/**
9310 * i40e_send_version - update firmware with driver version
9311 * @pf: PF struct
9312 */
9313static void i40e_send_version(struct i40e_pf *pf)
9314{
9315 struct i40e_driver_version dv;
9316
9317 dv.major_version = DRV_VERSION_MAJOR;
9318 dv.minor_version = DRV_VERSION_MINOR;
9319 dv.build_version = DRV_VERSION_BUILD;
9320 dv.subbuild_version = 0;
35a7d804 9321 strlcpy(dv.driver_string, DRV_VERSION, sizeof(dv.driver_string));
44033fac
JB
9322 i40e_aq_send_driver_version(&pf->hw, &dv, NULL);
9323}
9324
5bbb2e20
FS
9325/**
9326 * i40e_get_oem_version - get OEM specific version information
9327 * @hw: pointer to the hardware structure
9328 **/
9329static void i40e_get_oem_version(struct i40e_hw *hw)
9330{
9331 u16 block_offset = 0xffff;
9332 u16 block_length = 0;
9333 u16 capabilities = 0;
9334 u16 gen_snap = 0;
9335 u16 release = 0;
9336
9337#define I40E_SR_NVM_OEM_VERSION_PTR 0x1B
9338#define I40E_NVM_OEM_LENGTH_OFFSET 0x00
9339#define I40E_NVM_OEM_CAPABILITIES_OFFSET 0x01
9340#define I40E_NVM_OEM_GEN_OFFSET 0x02
9341#define I40E_NVM_OEM_RELEASE_OFFSET 0x03
9342#define I40E_NVM_OEM_CAPABILITIES_MASK 0x000F
9343#define I40E_NVM_OEM_LENGTH 3
9344
9345 /* Check if pointer to OEM version block is valid. */
9346 i40e_read_nvm_word(hw, I40E_SR_NVM_OEM_VERSION_PTR, &block_offset);
9347 if (block_offset == 0xffff)
9348 return;
9349
9350 /* Check if OEM version block has correct length. */
9351 i40e_read_nvm_word(hw, block_offset + I40E_NVM_OEM_LENGTH_OFFSET,
9352 &block_length);
9353 if (block_length < I40E_NVM_OEM_LENGTH)
9354 return;
9355
9356 /* Check if OEM version format is as expected. */
9357 i40e_read_nvm_word(hw, block_offset + I40E_NVM_OEM_CAPABILITIES_OFFSET,
9358 &capabilities);
9359 if ((capabilities & I40E_NVM_OEM_CAPABILITIES_MASK) != 0)
9360 return;
9361
9362 i40e_read_nvm_word(hw, block_offset + I40E_NVM_OEM_GEN_OFFSET,
9363 &gen_snap);
9364 i40e_read_nvm_word(hw, block_offset + I40E_NVM_OEM_RELEASE_OFFSET,
9365 &release);
9366 hw->nvm.oem_ver = (gen_snap << I40E_OEM_SNAP_SHIFT) | release;
9367 hw->nvm.eetrack = I40E_OEM_EETRACK_ID;
9368}
9369
f650a38b 9370/**
373149fc 9371 * i40e_reset - wait for core reset to finish reset, reset pf if corer not seen
f650a38b
ASJ
9372 * @pf: board private structure
9373 **/
373149fc 9374static int i40e_reset(struct i40e_pf *pf)
f650a38b 9375{
f650a38b
ASJ
9376 struct i40e_hw *hw = &pf->hw;
9377 i40e_status ret;
f650a38b 9378
41c445ff 9379 ret = i40e_pf_reset(hw);
b5565400 9380 if (ret) {
41c445ff 9381 dev_info(&pf->pdev->dev, "PF reset failed, %d\n", ret);
0da36b97
JK
9382 set_bit(__I40E_RESET_FAILED, pf->state);
9383 clear_bit(__I40E_RESET_RECOVERY_PENDING, pf->state);
373149fc
MS
9384 } else {
9385 pf->pfr_count++;
b5565400 9386 }
373149fc
MS
9387 return ret;
9388}
9389
9390/**
9391 * i40e_rebuild - rebuild using a saved config
9392 * @pf: board private structure
9393 * @reinit: if the Main VSI needs to re-initialized.
9394 * @lock_acquired: indicates whether or not the lock has been acquired
9395 * before this function was called.
9396 **/
9397static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
9398{
2027d4de 9399 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
373149fc
MS
9400 struct i40e_hw *hw = &pf->hw;
9401 u8 set_fc_aq_fail = 0;
9402 i40e_status ret;
9403 u32 val;
9404 int v;
41c445ff 9405
9e6c9c0f 9406 if (test_bit(__I40E_DOWN, pf->state))
a316f651 9407 goto clear_recovery;
69bfb110 9408 dev_dbg(&pf->pdev->dev, "Rebuilding internal switch\n");
41c445ff
JB
9409
9410 /* rebuild the basics for the AdminQ, HMC, and initial HW switch */
9411 ret = i40e_init_adminq(&pf->hw);
9412 if (ret) {
f1c7e72e
SN
9413 dev_info(&pf->pdev->dev, "Rebuild AdminQ failed, err %s aq_err %s\n",
9414 i40e_stat_str(&pf->hw, ret),
9415 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
a316f651 9416 goto clear_recovery;
41c445ff 9417 }
5bbb2e20 9418 i40e_get_oem_version(&pf->hw);
41c445ff 9419
1fa51a65
FS
9420 if (test_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state) &&
9421 ((hw->aq.fw_maj_ver == 4 && hw->aq.fw_min_ver <= 33) ||
9422 hw->aq.fw_maj_ver < 4) && hw->mac.type == I40E_MAC_XL710) {
9423 /* The following delay is necessary for 4.33 firmware and older
9424 * to recover after EMP reset. 200 ms should suffice but we
9425 * put here 300 ms to be sure that FW is ready to operate
9426 * after reset.
9427 */
9428 mdelay(300);
9429 }
9430
4eb3f768 9431 /* re-verify the eeprom if we just had an EMP reset */
0da36b97 9432 if (test_and_clear_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state))
4eb3f768 9433 i40e_verify_eeprom(pf);
4eb3f768 9434
e78ac4bf 9435 i40e_clear_pxe_mode(hw);
2f4b411a 9436 ret = i40e_get_capabilities(pf, i40e_aqc_opc_list_func_capabilities);
f1c7e72e 9437 if (ret)
41c445ff 9438 goto end_core_reset;
41c445ff 9439
41c445ff 9440 ret = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
c76cb6ed 9441 hw->func_caps.num_rx_qp, 0, 0);
41c445ff
JB
9442 if (ret) {
9443 dev_info(&pf->pdev->dev, "init_lan_hmc failed: %d\n", ret);
9444 goto end_core_reset;
9445 }
9446 ret = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
9447 if (ret) {
9448 dev_info(&pf->pdev->dev, "configure_lan_hmc failed: %d\n", ret);
9449 goto end_core_reset;
9450 }
9451
c61c8fe1
DE
9452 /* Enable FW to write a default DCB config on link-up */
9453 i40e_aq_set_dcb_parameters(hw, true, NULL);
9454
4e3b35b0
NP
9455#ifdef CONFIG_I40E_DCB
9456 ret = i40e_init_pf_dcb(pf);
9457 if (ret) {
aebfc816
SN
9458 dev_info(&pf->pdev->dev, "DCB init failed %d, disabled\n", ret);
9459 pf->flags &= ~I40E_FLAG_DCB_CAPABLE;
9460 /* Continue without DCB enabled */
4e3b35b0
NP
9461 }
9462#endif /* CONFIG_I40E_DCB */
41c445ff 9463 /* do basic switch setup */
373149fc
MS
9464 if (!lock_acquired)
9465 rtnl_lock();
bc7d338f 9466 ret = i40e_setup_pf_switch(pf, reinit);
41c445ff 9467 if (ret)
373149fc 9468 goto end_unlock;
41c445ff 9469
2f0aff41
SN
9470 /* The driver only wants link up/down and module qualification
9471 * reports from firmware. Note the negative logic.
7e2453fe
JB
9472 */
9473 ret = i40e_aq_set_phy_int_mask(&pf->hw,
2f0aff41 9474 ~(I40E_AQ_EVENT_LINK_UPDOWN |
867a79e3 9475 I40E_AQ_EVENT_MEDIA_NA |
2f0aff41 9476 I40E_AQ_EVENT_MODULE_QUAL_FAIL), NULL);
7e2453fe 9477 if (ret)
f1c7e72e
SN
9478 dev_info(&pf->pdev->dev, "set phy mask fail, err %s aq_err %s\n",
9479 i40e_stat_str(&pf->hw, ret),
9480 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
7e2453fe 9481
cafa2ee6
ASJ
9482 /* make sure our flow control settings are restored */
9483 ret = i40e_set_fc(&pf->hw, &set_fc_aq_fail, true);
9484 if (ret)
8279e495
NP
9485 dev_dbg(&pf->pdev->dev, "setting flow control: ret = %s last_status = %s\n",
9486 i40e_stat_str(&pf->hw, ret),
9487 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
cafa2ee6 9488
41c445ff
JB
9489 /* Rebuild the VSIs and VEBs that existed before reset.
9490 * They are still in our local switch element arrays, so only
9491 * need to rebuild the switch model in the HW.
9492 *
9493 * If there were VEBs but the reconstitution failed, we'll try
9494 * try to recover minimal use by getting the basic PF VSI working.
9495 */
2027d4de 9496 if (vsi->uplink_seid != pf->mac_seid) {
69bfb110 9497 dev_dbg(&pf->pdev->dev, "attempting to rebuild switch\n");
41c445ff
JB
9498 /* find the one VEB connected to the MAC, and find orphans */
9499 for (v = 0; v < I40E_MAX_VEB; v++) {
9500 if (!pf->veb[v])
9501 continue;
9502
9503 if (pf->veb[v]->uplink_seid == pf->mac_seid ||
9504 pf->veb[v]->uplink_seid == 0) {
9505 ret = i40e_reconstitute_veb(pf->veb[v]);
9506
9507 if (!ret)
9508 continue;
9509
9510 /* If Main VEB failed, we're in deep doodoo,
9511 * so give up rebuilding the switch and set up
9512 * for minimal rebuild of PF VSI.
9513 * If orphan failed, we'll report the error
9514 * but try to keep going.
9515 */
9516 if (pf->veb[v]->uplink_seid == pf->mac_seid) {
9517 dev_info(&pf->pdev->dev,
9518 "rebuild of switch failed: %d, will try to set up simple PF connection\n",
9519 ret);
2027d4de 9520 vsi->uplink_seid = pf->mac_seid;
41c445ff
JB
9521 break;
9522 } else if (pf->veb[v]->uplink_seid == 0) {
9523 dev_info(&pf->pdev->dev,
9524 "rebuild of orphan VEB failed: %d\n",
9525 ret);
9526 }
9527 }
9528 }
9529 }
9530
2027d4de 9531 if (vsi->uplink_seid == pf->mac_seid) {
cde4cbc7 9532 dev_dbg(&pf->pdev->dev, "attempting to rebuild PF VSI\n");
41c445ff 9533 /* no VEB, so rebuild only the Main VSI */
2027d4de 9534 ret = i40e_add_vsi(vsi);
41c445ff
JB
9535 if (ret) {
9536 dev_info(&pf->pdev->dev,
9537 "rebuild of Main VSI failed: %d\n", ret);
373149fc 9538 goto end_unlock;
41c445ff
JB
9539 }
9540 }
9541
2027d4de 9542 if (vsi->mqprio_qopt.max_rate[0]) {
6c32e0d9
AB
9543 u64 max_tx_rate = vsi->mqprio_qopt.max_rate[0];
9544 u64 credits = 0;
2027d4de 9545
6c32e0d9 9546 do_div(max_tx_rate, I40E_BW_MBPS_DIVISOR);
2027d4de 9547 ret = i40e_set_bw_limit(vsi, vsi->seid, max_tx_rate);
6c32e0d9 9548 if (ret)
2027d4de 9549 goto end_unlock;
6c32e0d9
AB
9550
9551 credits = max_tx_rate;
9552 do_div(credits, I40E_BW_CREDIT_DIVISOR);
9553 dev_dbg(&vsi->back->pdev->dev,
9554 "Set tx rate of %llu Mbps (count of 50Mbps %llu) for vsi->seid %u\n",
9555 max_tx_rate,
9556 credits,
9557 vsi->seid);
2027d4de
AN
9558 }
9559
2f4b411a
AN
9560 ret = i40e_rebuild_cloud_filters(vsi, vsi->seid);
9561 if (ret)
9562 goto end_unlock;
9563
8f88b303
AN
9564 /* PF Main VSI is rebuild by now, go ahead and rebuild channel VSIs
9565 * for this main VSI if they exist
9566 */
2027d4de 9567 ret = i40e_rebuild_channels(vsi);
8f88b303
AN
9568 if (ret)
9569 goto end_unlock;
9570
4f2f017c
ASJ
9571 /* Reconfigure hardware for allowing smaller MSS in the case
9572 * of TSO, so that we avoid the MDD being fired and causing
9573 * a reset in the case of small MSS+TSO.
9574 */
9575#define I40E_REG_MSS 0x000E64DC
9576#define I40E_REG_MSS_MIN_MASK 0x3FF0000
9577#define I40E_64BYTE_MSS 0x400000
9578 val = rd32(hw, I40E_REG_MSS);
9579 if ((val & I40E_REG_MSS_MIN_MASK) > I40E_64BYTE_MSS) {
9580 val &= ~I40E_REG_MSS_MIN_MASK;
9581 val |= I40E_64BYTE_MSS;
9582 wr32(hw, I40E_REG_MSS, val);
9583 }
9584
d36e41dc 9585 if (pf->hw_features & I40E_HW_RESTART_AUTONEG) {
025b4a54
ASJ
9586 msleep(75);
9587 ret = i40e_aq_set_link_restart_an(&pf->hw, true, NULL);
9588 if (ret)
f1c7e72e
SN
9589 dev_info(&pf->pdev->dev, "link restart failed, err %s aq_err %s\n",
9590 i40e_stat_str(&pf->hw, ret),
9591 i40e_aq_str(&pf->hw,
9592 pf->hw.aq.asq_last_status));
cafa2ee6 9593 }
41c445ff
JB
9594 /* reinit the misc interrupt */
9595 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
9596 ret = i40e_setup_misc_vector(pf);
9597
e7358f54
ASJ
9598 /* Add a filter to drop all Flow control frames from any VSI from being
9599 * transmitted. By doing so we stop a malicious VF from sending out
9600 * PAUSE or PFC frames and potentially controlling traffic for other
9601 * PF/VF VSIs.
9602 * The FW can still send Flow control frames if enabled.
9603 */
9604 i40e_add_filter_to_drop_tx_flow_control_frames(&pf->hw,
9605 pf->main_vsi_seid);
9606
41c445ff
JB
9607 /* restart the VSIs that were rebuilt and running before the reset */
9608 i40e_pf_unquiesce_all_vsi(pf);
9609
024b05f4
JK
9610 /* Release the RTNL lock before we start resetting VFs */
9611 if (!lock_acquired)
9612 rtnl_unlock();
9613
bd5608b3
AB
9614 /* Restore promiscuous settings */
9615 ret = i40e_set_promiscuous(pf, pf->cur_promisc);
9616 if (ret)
9617 dev_warn(&pf->pdev->dev,
9618 "Failed to restore promiscuous setting: %s, err %s aq_err %s\n",
9619 pf->cur_promisc ? "on" : "off",
9620 i40e_stat_str(&pf->hw, ret),
9621 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
9622
e4b433f4 9623 i40e_reset_all_vfs(pf, true);
69f64b2b 9624
41c445ff 9625 /* tell the firmware that we're starting */
44033fac 9626 i40e_send_version(pf);
41c445ff 9627
024b05f4
JK
9628 /* We've already released the lock, so don't do it again */
9629 goto end_core_reset;
9630
373149fc 9631end_unlock:
024b05f4
JK
9632 if (!lock_acquired)
9633 rtnl_unlock();
41c445ff 9634end_core_reset:
0da36b97 9635 clear_bit(__I40E_RESET_FAILED, pf->state);
a316f651 9636clear_recovery:
0da36b97 9637 clear_bit(__I40E_RESET_RECOVERY_PENDING, pf->state);
d5585b7b 9638 clear_bit(__I40E_TIMEOUT_RECOVERY_PENDING, pf->state);
41c445ff
JB
9639}
9640
373149fc
MS
9641/**
9642 * i40e_reset_and_rebuild - reset and rebuild using a saved config
9643 * @pf: board private structure
9644 * @reinit: if the Main VSI needs to re-initialized.
9645 * @lock_acquired: indicates whether or not the lock has been acquired
9646 * before this function was called.
9647 **/
9648static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit,
9649 bool lock_acquired)
9650{
9651 int ret;
9652 /* Now we wait for GRST to settle out.
9653 * We don't have to delete the VEBs or VSIs from the hw switch
9654 * because the reset will make them disappear.
9655 */
9656 ret = i40e_reset(pf);
9657 if (!ret)
9658 i40e_rebuild(pf, reinit, lock_acquired);
9659}
9660
f650a38b 9661/**
b40c82e6 9662 * i40e_handle_reset_warning - prep for the PF to reset, reset and rebuild
f650a38b
ASJ
9663 * @pf: board private structure
9664 *
9665 * Close up the VFs and other things in prep for a Core Reset,
9666 * then get ready to rebuild the world.
373149fc
MS
9667 * @lock_acquired: indicates whether or not the lock has been acquired
9668 * before this function was called.
f650a38b 9669 **/
373149fc 9670static void i40e_handle_reset_warning(struct i40e_pf *pf, bool lock_acquired)
f650a38b 9671{
373149fc
MS
9672 i40e_prep_for_reset(pf, lock_acquired);
9673 i40e_reset_and_rebuild(pf, false, lock_acquired);
f650a38b
ASJ
9674}
9675
41c445ff
JB
9676/**
9677 * i40e_handle_mdd_event
b40c82e6 9678 * @pf: pointer to the PF structure
41c445ff
JB
9679 *
9680 * Called from the MDD irq handler to identify possibly malicious vfs
9681 **/
9682static void i40e_handle_mdd_event(struct i40e_pf *pf)
9683{
9684 struct i40e_hw *hw = &pf->hw;
9685 bool mdd_detected = false;
df430b12 9686 bool pf_mdd_detected = false;
41c445ff
JB
9687 struct i40e_vf *vf;
9688 u32 reg;
9689 int i;
9690
0da36b97 9691 if (!test_bit(__I40E_MDD_EVENT_PENDING, pf->state))
41c445ff
JB
9692 return;
9693
9694 /* find what triggered the MDD event */
9695 reg = rd32(hw, I40E_GL_MDET_TX);
9696 if (reg & I40E_GL_MDET_TX_VALID_MASK) {
4c33f83a
ASJ
9697 u8 pf_num = (reg & I40E_GL_MDET_TX_PF_NUM_MASK) >>
9698 I40E_GL_MDET_TX_PF_NUM_SHIFT;
2089ad03 9699 u16 vf_num = (reg & I40E_GL_MDET_TX_VF_NUM_MASK) >>
4c33f83a 9700 I40E_GL_MDET_TX_VF_NUM_SHIFT;
013f6579 9701 u8 event = (reg & I40E_GL_MDET_TX_EVENT_MASK) >>
4c33f83a 9702 I40E_GL_MDET_TX_EVENT_SHIFT;
2089ad03
MW
9703 u16 queue = ((reg & I40E_GL_MDET_TX_QUEUE_MASK) >>
9704 I40E_GL_MDET_TX_QUEUE_SHIFT) -
9705 pf->hw.func_caps.base_queue;
faf32978 9706 if (netif_msg_tx_err(pf))
b40c82e6 9707 dev_info(&pf->pdev->dev, "Malicious Driver Detection event 0x%02x on TX queue %d PF number 0x%02x VF number 0x%02x\n",
faf32978 9708 event, queue, pf_num, vf_num);
41c445ff
JB
9709 wr32(hw, I40E_GL_MDET_TX, 0xffffffff);
9710 mdd_detected = true;
9711 }
9712 reg = rd32(hw, I40E_GL_MDET_RX);
9713 if (reg & I40E_GL_MDET_RX_VALID_MASK) {
4c33f83a
ASJ
9714 u8 func = (reg & I40E_GL_MDET_RX_FUNCTION_MASK) >>
9715 I40E_GL_MDET_RX_FUNCTION_SHIFT;
013f6579 9716 u8 event = (reg & I40E_GL_MDET_RX_EVENT_MASK) >>
4c33f83a 9717 I40E_GL_MDET_RX_EVENT_SHIFT;
2089ad03
MW
9718 u16 queue = ((reg & I40E_GL_MDET_RX_QUEUE_MASK) >>
9719 I40E_GL_MDET_RX_QUEUE_SHIFT) -
9720 pf->hw.func_caps.base_queue;
faf32978
JB
9721 if (netif_msg_rx_err(pf))
9722 dev_info(&pf->pdev->dev, "Malicious Driver Detection event 0x%02x on RX queue %d of function 0x%02x\n",
9723 event, queue, func);
41c445ff
JB
9724 wr32(hw, I40E_GL_MDET_RX, 0xffffffff);
9725 mdd_detected = true;
9726 }
9727
df430b12
NP
9728 if (mdd_detected) {
9729 reg = rd32(hw, I40E_PF_MDET_TX);
9730 if (reg & I40E_PF_MDET_TX_VALID_MASK) {
9731 wr32(hw, I40E_PF_MDET_TX, 0xFFFF);
faf32978 9732 dev_info(&pf->pdev->dev, "TX driver issue detected, PF reset issued\n");
df430b12
NP
9733 pf_mdd_detected = true;
9734 }
9735 reg = rd32(hw, I40E_PF_MDET_RX);
9736 if (reg & I40E_PF_MDET_RX_VALID_MASK) {
9737 wr32(hw, I40E_PF_MDET_RX, 0xFFFF);
faf32978 9738 dev_info(&pf->pdev->dev, "RX driver issue detected, PF reset issued\n");
df430b12
NP
9739 pf_mdd_detected = true;
9740 }
9741 /* Queue belongs to the PF, initiate a reset */
9742 if (pf_mdd_detected) {
0da36b97 9743 set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
df430b12
NP
9744 i40e_service_event_schedule(pf);
9745 }
9746 }
9747
41c445ff
JB
9748 /* see if one of the VFs needs its hand slapped */
9749 for (i = 0; i < pf->num_alloc_vfs && mdd_detected; i++) {
9750 vf = &(pf->vf[i]);
9751 reg = rd32(hw, I40E_VP_MDET_TX(i));
9752 if (reg & I40E_VP_MDET_TX_VALID_MASK) {
9753 wr32(hw, I40E_VP_MDET_TX(i), 0xFFFF);
9754 vf->num_mdd_events++;
faf32978
JB
9755 dev_info(&pf->pdev->dev, "TX driver issue detected on VF %d\n",
9756 i);
41c445ff
JB
9757 }
9758
9759 reg = rd32(hw, I40E_VP_MDET_RX(i));
9760 if (reg & I40E_VP_MDET_RX_VALID_MASK) {
9761 wr32(hw, I40E_VP_MDET_RX(i), 0xFFFF);
9762 vf->num_mdd_events++;
faf32978
JB
9763 dev_info(&pf->pdev->dev, "RX driver issue detected on VF %d\n",
9764 i);
41c445ff
JB
9765 }
9766
9767 if (vf->num_mdd_events > I40E_DEFAULT_NUM_MDD_EVENTS_ALLOWED) {
9768 dev_info(&pf->pdev->dev,
9769 "Too many MDD events on VF %d, disabled\n", i);
9770 dev_info(&pf->pdev->dev,
9771 "Use PF Control I/F to re-enable the VF\n");
6322e63c 9772 set_bit(I40E_VF_STATE_DISABLED, &vf->vf_states);
41c445ff
JB
9773 }
9774 }
9775
9776 /* re-enable mdd interrupt cause */
0da36b97 9777 clear_bit(__I40E_MDD_EVENT_PENDING, pf->state);
41c445ff
JB
9778 reg = rd32(hw, I40E_PFINT_ICR0_ENA);
9779 reg |= I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
9780 wr32(hw, I40E_PFINT_ICR0_ENA, reg);
9781 i40e_flush(hw);
9782}
9783
5305d0fe 9784static const char *i40e_tunnel_name(u8 type)
d8b2c700 9785{
5305d0fe 9786 switch (type) {
d8b2c700
JK
9787 case UDP_TUNNEL_TYPE_VXLAN:
9788 return "vxlan";
9789 case UDP_TUNNEL_TYPE_GENEVE:
9790 return "geneve";
9791 default:
9792 return "unknown";
9793 }
9794}
9795
1f190d93
AD
9796/**
9797 * i40e_sync_udp_filters - Trigger a sync event for existing UDP filters
9798 * @pf: board private structure
9799 **/
9800static void i40e_sync_udp_filters(struct i40e_pf *pf)
9801{
9802 int i;
9803
9804 /* loop through and set pending bit for all active UDP filters */
9805 for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
9806 if (pf->udp_ports[i].port)
9807 pf->pending_udp_bitmap |= BIT_ULL(i);
9808 }
9809
41898c66 9810 set_bit(__I40E_UDP_FILTER_SYNC_PENDING, pf->state);
1f190d93
AD
9811}
9812
a1c9a9d9 9813/**
6a899024 9814 * i40e_sync_udp_filters_subtask - Sync the VSI filter list with HW
a1c9a9d9
JK
9815 * @pf: board private structure
9816 **/
6a899024 9817static void i40e_sync_udp_filters_subtask(struct i40e_pf *pf)
a1c9a9d9 9818{
a1c9a9d9 9819 struct i40e_hw *hw = &pf->hw;
5305d0fe 9820 u8 filter_index, type;
fe0b0cd9 9821 u16 port;
a1c9a9d9
JK
9822 int i;
9823
41898c66 9824 if (!test_and_clear_bit(__I40E_UDP_FILTER_SYNC_PENDING, pf->state))
a1c9a9d9
JK
9825 return;
9826
5305d0fe
AD
9827 /* acquire RTNL to maintain state of flags and port requests */
9828 rtnl_lock();
9829
a1c9a9d9 9830 for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
6a899024 9831 if (pf->pending_udp_bitmap & BIT_ULL(i)) {
5305d0fe
AD
9832 struct i40e_udp_port_config *udp_port;
9833 i40e_status ret = 0;
9834
9835 udp_port = &pf->udp_ports[i];
6a899024 9836 pf->pending_udp_bitmap &= ~BIT_ULL(i);
5305d0fe
AD
9837
9838 port = READ_ONCE(udp_port->port);
9839 type = READ_ONCE(udp_port->type);
9840 filter_index = READ_ONCE(udp_port->filter_index);
9841
9842 /* release RTNL while we wait on AQ command */
9843 rtnl_unlock();
9844
c22c06c8 9845 if (port)
b3f5c7bc 9846 ret = i40e_aq_add_udp_tunnel(hw, port,
5305d0fe
AD
9847 type,
9848 &filter_index,
9849 NULL);
9850 else if (filter_index != I40E_UDP_PORT_INDEX_UNUSED)
9851 ret = i40e_aq_del_udp_tunnel(hw, filter_index,
9852 NULL);
9853
9854 /* reacquire RTNL so we can update filter_index */
9855 rtnl_lock();
a1c9a9d9
JK
9856
9857 if (ret) {
d8b2c700
JK
9858 dev_info(&pf->pdev->dev,
9859 "%s %s port %d, index %d failed, err %s aq_err %s\n",
5305d0fe 9860 i40e_tunnel_name(type),
d8b2c700 9861 port ? "add" : "delete",
5305d0fe
AD
9862 port,
9863 filter_index,
d8b2c700
JK
9864 i40e_stat_str(&pf->hw, ret),
9865 i40e_aq_str(&pf->hw,
9866 pf->hw.aq.asq_last_status));
5305d0fe
AD
9867 if (port) {
9868 /* failed to add, just reset port,
9869 * drop pending bit for any deletion
9870 */
9871 udp_port->port = 0;
9872 pf->pending_udp_bitmap &= ~BIT_ULL(i);
9873 }
9874 } else if (port) {
9875 /* record filter index on success */
9876 udp_port->filter_index = filter_index;
a1c9a9d9
JK
9877 }
9878 }
9879 }
5305d0fe
AD
9880
9881 rtnl_unlock();
a1c9a9d9
JK
9882}
9883
41c445ff
JB
9884/**
9885 * i40e_service_task - Run the driver's async subtasks
9886 * @work: pointer to work_struct containing our data
9887 **/
9888static void i40e_service_task(struct work_struct *work)
9889{
9890 struct i40e_pf *pf = container_of(work,
9891 struct i40e_pf,
9892 service_task);
9893 unsigned long start_time = jiffies;
9894
e57a2fea 9895 /* don't bother with service tasks if a reset is in progress */
0da36b97 9896 if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
e57a2fea 9897 return;
e57a2fea 9898
0da36b97 9899 if (test_and_set_bit(__I40E_SERVICE_SCHED, pf->state))
91089033
MW
9900 return;
9901
07d44190 9902 i40e_detect_recover_hung(pf->vsi[pf->lan_vsi]);
2818ccd9 9903 i40e_sync_filters_subtask(pf);
41c445ff
JB
9904 i40e_reset_subtask(pf);
9905 i40e_handle_mdd_event(pf);
9906 i40e_vc_process_vflr_event(pf);
9907 i40e_watchdog_subtask(pf);
9908 i40e_fdir_reinit_subtask(pf);
5f76a704 9909 if (test_and_clear_bit(__I40E_CLIENT_RESET, pf->state)) {
0ef2d5af
MW
9910 /* Client subtask will reopen next time through. */
9911 i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], true);
0ef2d5af
MW
9912 } else {
9913 i40e_client_subtask(pf);
5f76a704
JK
9914 if (test_and_clear_bit(__I40E_CLIENT_L2_CHANGE,
9915 pf->state))
0ef2d5af
MW
9916 i40e_notify_client_of_l2_param_changes(
9917 pf->vsi[pf->lan_vsi]);
0ef2d5af 9918 }
41c445ff 9919 i40e_sync_filters_subtask(pf);
6a899024 9920 i40e_sync_udp_filters_subtask(pf);
41c445ff
JB
9921 i40e_clean_adminq_subtask(pf);
9922
91089033
MW
9923 /* flush memory to make sure state is correct before next watchdog */
9924 smp_mb__before_atomic();
0da36b97 9925 clear_bit(__I40E_SERVICE_SCHED, pf->state);
41c445ff
JB
9926
9927 /* If the tasks have taken longer than one timer cycle or there
9928 * is more work to be done, reschedule the service task now
9929 * rather than wait for the timer to tick again.
9930 */
9931 if (time_after(jiffies, (start_time + pf->service_timer_period)) ||
0da36b97
JK
9932 test_bit(__I40E_ADMINQ_EVENT_PENDING, pf->state) ||
9933 test_bit(__I40E_MDD_EVENT_PENDING, pf->state) ||
9934 test_bit(__I40E_VFLR_EVENT_PENDING, pf->state))
41c445ff
JB
9935 i40e_service_event_schedule(pf);
9936}
9937
9938/**
9939 * i40e_service_timer - timer callback
9940 * @data: pointer to PF struct
9941 **/
26566eae 9942static void i40e_service_timer(struct timer_list *t)
41c445ff 9943{
26566eae 9944 struct i40e_pf *pf = from_timer(pf, t, service_timer);
41c445ff
JB
9945
9946 mod_timer(&pf->service_timer,
9947 round_jiffies(jiffies + pf->service_timer_period));
9948 i40e_service_event_schedule(pf);
9949}
9950
9951/**
9952 * i40e_set_num_rings_in_vsi - Determine number of rings in the VSI
9953 * @vsi: the VSI being configured
9954 **/
9955static int i40e_set_num_rings_in_vsi(struct i40e_vsi *vsi)
9956{
9957 struct i40e_pf *pf = vsi->back;
9958
9959 switch (vsi->type) {
9960 case I40E_VSI_MAIN:
9961 vsi->alloc_queue_pairs = pf->num_lan_qps;
9962 vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
9963 I40E_REQ_DESCRIPTOR_MULTIPLE);
9964 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
9965 vsi->num_q_vectors = pf->num_lan_msix;
9966 else
9967 vsi->num_q_vectors = 1;
9968
9969 break;
9970
9971 case I40E_VSI_FDIR:
9972 vsi->alloc_queue_pairs = 1;
9973 vsi->num_desc = ALIGN(I40E_FDIR_RING_COUNT,
9974 I40E_REQ_DESCRIPTOR_MULTIPLE);
a70e407f 9975 vsi->num_q_vectors = pf->num_fdsb_msix;
41c445ff
JB
9976 break;
9977
9978 case I40E_VSI_VMDQ2:
9979 vsi->alloc_queue_pairs = pf->num_vmdq_qps;
9980 vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
9981 I40E_REQ_DESCRIPTOR_MULTIPLE);
9982 vsi->num_q_vectors = pf->num_vmdq_msix;
9983 break;
9984
9985 case I40E_VSI_SRIOV:
9986 vsi->alloc_queue_pairs = pf->num_vf_qps;
9987 vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
9988 I40E_REQ_DESCRIPTOR_MULTIPLE);
9989 break;
9990
9991 default:
9992 WARN_ON(1);
9993 return -ENODATA;
9994 }
9995
9996 return 0;
9997}
9998
f650a38b
ASJ
9999/**
10000 * i40e_vsi_alloc_arrays - Allocate queue and vector pointer arrays for the vsi
3d7d7a86 10001 * @vsi: VSI pointer
bc7d338f 10002 * @alloc_qvectors: a bool to specify if q_vectors need to be allocated.
f650a38b
ASJ
10003 *
10004 * On error: returns error code (negative)
10005 * On success: returns 0
10006 **/
bc7d338f 10007static int i40e_vsi_alloc_arrays(struct i40e_vsi *vsi, bool alloc_qvectors)
f650a38b 10008{
74608d17 10009 struct i40e_ring **next_rings;
f650a38b
ASJ
10010 int size;
10011 int ret = 0;
10012
74608d17
BT
10013 /* allocate memory for both Tx, XDP Tx and Rx ring pointers */
10014 size = sizeof(struct i40e_ring *) * vsi->alloc_queue_pairs *
10015 (i40e_enabled_xdp_vsi(vsi) ? 3 : 2);
f650a38b
ASJ
10016 vsi->tx_rings = kzalloc(size, GFP_KERNEL);
10017 if (!vsi->tx_rings)
10018 return -ENOMEM;
74608d17
BT
10019 next_rings = vsi->tx_rings + vsi->alloc_queue_pairs;
10020 if (i40e_enabled_xdp_vsi(vsi)) {
10021 vsi->xdp_rings = next_rings;
10022 next_rings += vsi->alloc_queue_pairs;
10023 }
10024 vsi->rx_rings = next_rings;
f650a38b 10025
bc7d338f
ASJ
10026 if (alloc_qvectors) {
10027 /* allocate memory for q_vector pointers */
f57e4fbd 10028 size = sizeof(struct i40e_q_vector *) * vsi->num_q_vectors;
bc7d338f
ASJ
10029 vsi->q_vectors = kzalloc(size, GFP_KERNEL);
10030 if (!vsi->q_vectors) {
10031 ret = -ENOMEM;
10032 goto err_vectors;
10033 }
f650a38b
ASJ
10034 }
10035 return ret;
10036
10037err_vectors:
10038 kfree(vsi->tx_rings);
10039 return ret;
10040}
10041
41c445ff
JB
10042/**
10043 * i40e_vsi_mem_alloc - Allocates the next available struct vsi in the PF
10044 * @pf: board private structure
10045 * @type: type of VSI
10046 *
10047 * On error: returns error code (negative)
10048 * On success: returns vsi index in PF (positive)
10049 **/
10050static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type)
10051{
10052 int ret = -ENODEV;
10053 struct i40e_vsi *vsi;
10054 int vsi_idx;
10055 int i;
10056
10057 /* Need to protect the allocation of the VSIs at the PF level */
10058 mutex_lock(&pf->switch_mutex);
10059
10060 /* VSI list may be fragmented if VSI creation/destruction has
10061 * been happening. We can afford to do a quick scan to look
10062 * for any free VSIs in the list.
10063 *
10064 * find next empty vsi slot, looping back around if necessary
10065 */
10066 i = pf->next_vsi;
505682cd 10067 while (i < pf->num_alloc_vsi && pf->vsi[i])
41c445ff 10068 i++;
505682cd 10069 if (i >= pf->num_alloc_vsi) {
41c445ff
JB
10070 i = 0;
10071 while (i < pf->next_vsi && pf->vsi[i])
10072 i++;
10073 }
10074
505682cd 10075 if (i < pf->num_alloc_vsi && !pf->vsi[i]) {
41c445ff
JB
10076 vsi_idx = i; /* Found one! */
10077 } else {
10078 ret = -ENODEV;
493fb300 10079 goto unlock_pf; /* out of VSI slots! */
41c445ff
JB
10080 }
10081 pf->next_vsi = ++i;
10082
10083 vsi = kzalloc(sizeof(*vsi), GFP_KERNEL);
10084 if (!vsi) {
10085 ret = -ENOMEM;
493fb300 10086 goto unlock_pf;
41c445ff
JB
10087 }
10088 vsi->type = type;
10089 vsi->back = pf;
0da36b97 10090 set_bit(__I40E_VSI_DOWN, vsi->state);
41c445ff
JB
10091 vsi->flags = 0;
10092 vsi->idx = vsi_idx;
ac26fc13 10093 vsi->int_rate_limit = 0;
5db4cb59
ASJ
10094 vsi->rss_table_size = (vsi->type == I40E_VSI_MAIN) ?
10095 pf->rss_table_size : 64;
41c445ff
JB
10096 vsi->netdev_registered = false;
10097 vsi->work_limit = I40E_DEFAULT_IRQ_WORK;
278e7d0b 10098 hash_init(vsi->mac_filter_hash);
63741846 10099 vsi->irqs_ready = false;
41c445ff 10100
9f65e15b
AD
10101 ret = i40e_set_num_rings_in_vsi(vsi);
10102 if (ret)
10103 goto err_rings;
10104
bc7d338f 10105 ret = i40e_vsi_alloc_arrays(vsi, true);
f650a38b 10106 if (ret)
9f65e15b 10107 goto err_rings;
493fb300 10108
41c445ff
JB
10109 /* Setup default MSIX irq handler for VSI */
10110 i40e_vsi_setup_irqhandler(vsi, i40e_msix_clean_rings);
10111
21659035 10112 /* Initialize VSI lock */
278e7d0b 10113 spin_lock_init(&vsi->mac_filter_hash_lock);
41c445ff
JB
10114 pf->vsi[vsi_idx] = vsi;
10115 ret = vsi_idx;
493fb300
AD
10116 goto unlock_pf;
10117
9f65e15b 10118err_rings:
493fb300
AD
10119 pf->next_vsi = i - 1;
10120 kfree(vsi);
10121unlock_pf:
41c445ff
JB
10122 mutex_unlock(&pf->switch_mutex);
10123 return ret;
10124}
10125
f650a38b
ASJ
10126/**
10127 * i40e_vsi_free_arrays - Free queue and vector pointer arrays for the VSI
f5254429 10128 * @vsi: VSI pointer
bc7d338f 10129 * @free_qvectors: a bool to specify if q_vectors need to be freed.
f650a38b
ASJ
10130 *
10131 * On error: returns error code (negative)
10132 * On success: returns 0
10133 **/
bc7d338f 10134static void i40e_vsi_free_arrays(struct i40e_vsi *vsi, bool free_qvectors)
f650a38b
ASJ
10135{
10136 /* free the ring and vector containers */
bc7d338f
ASJ
10137 if (free_qvectors) {
10138 kfree(vsi->q_vectors);
10139 vsi->q_vectors = NULL;
10140 }
f650a38b
ASJ
10141 kfree(vsi->tx_rings);
10142 vsi->tx_rings = NULL;
10143 vsi->rx_rings = NULL;
74608d17 10144 vsi->xdp_rings = NULL;
f650a38b
ASJ
10145}
10146
28c5869f
HZ
10147/**
10148 * i40e_clear_rss_config_user - clear the user configured RSS hash keys
10149 * and lookup table
10150 * @vsi: Pointer to VSI structure
10151 */
10152static void i40e_clear_rss_config_user(struct i40e_vsi *vsi)
10153{
10154 if (!vsi)
10155 return;
10156
10157 kfree(vsi->rss_hkey_user);
10158 vsi->rss_hkey_user = NULL;
10159
10160 kfree(vsi->rss_lut_user);
10161 vsi->rss_lut_user = NULL;
10162}
10163
41c445ff
JB
10164/**
10165 * i40e_vsi_clear - Deallocate the VSI provided
10166 * @vsi: the VSI being un-configured
10167 **/
10168static int i40e_vsi_clear(struct i40e_vsi *vsi)
10169{
10170 struct i40e_pf *pf;
10171
10172 if (!vsi)
10173 return 0;
10174
10175 if (!vsi->back)
10176 goto free_vsi;
10177 pf = vsi->back;
10178
10179 mutex_lock(&pf->switch_mutex);
10180 if (!pf->vsi[vsi->idx]) {
7be78aa4
MW
10181 dev_err(&pf->pdev->dev, "pf->vsi[%d] is NULL, just free vsi[%d](type %d)\n",
10182 vsi->idx, vsi->idx, vsi->type);
41c445ff
JB
10183 goto unlock_vsi;
10184 }
10185
10186 if (pf->vsi[vsi->idx] != vsi) {
10187 dev_err(&pf->pdev->dev,
7be78aa4 10188 "pf->vsi[%d](type %d) != vsi[%d](type %d): no free!\n",
41c445ff 10189 pf->vsi[vsi->idx]->idx,
41c445ff 10190 pf->vsi[vsi->idx]->type,
7be78aa4 10191 vsi->idx, vsi->type);
41c445ff
JB
10192 goto unlock_vsi;
10193 }
10194
b40c82e6 10195 /* updates the PF for this cleared vsi */
41c445ff
JB
10196 i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx);
10197 i40e_put_lump(pf->irq_pile, vsi->base_vector, vsi->idx);
10198
bc7d338f 10199 i40e_vsi_free_arrays(vsi, true);
28c5869f 10200 i40e_clear_rss_config_user(vsi);
493fb300 10201
41c445ff
JB
10202 pf->vsi[vsi->idx] = NULL;
10203 if (vsi->idx < pf->next_vsi)
10204 pf->next_vsi = vsi->idx;
10205
10206unlock_vsi:
10207 mutex_unlock(&pf->switch_mutex);
10208free_vsi:
10209 kfree(vsi);
10210
10211 return 0;
10212}
10213
9f65e15b
AD
10214/**
10215 * i40e_vsi_clear_rings - Deallocates the Rx and Tx rings for the provided VSI
10216 * @vsi: the VSI being cleaned
10217 **/
be1d5eea 10218static void i40e_vsi_clear_rings(struct i40e_vsi *vsi)
9f65e15b
AD
10219{
10220 int i;
10221
8e9dca53 10222 if (vsi->tx_rings && vsi->tx_rings[0]) {
d7397644 10223 for (i = 0; i < vsi->alloc_queue_pairs; i++) {
00403f04
MW
10224 kfree_rcu(vsi->tx_rings[i], rcu);
10225 vsi->tx_rings[i] = NULL;
10226 vsi->rx_rings[i] = NULL;
74608d17
BT
10227 if (vsi->xdp_rings)
10228 vsi->xdp_rings[i] = NULL;
00403f04 10229 }
be1d5eea 10230 }
9f65e15b
AD
10231}
10232
41c445ff
JB
10233/**
10234 * i40e_alloc_rings - Allocates the Rx and Tx rings for the provided VSI
10235 * @vsi: the VSI being configured
10236 **/
10237static int i40e_alloc_rings(struct i40e_vsi *vsi)
10238{
74608d17 10239 int i, qpv = i40e_enabled_xdp_vsi(vsi) ? 3 : 2;
41c445ff 10240 struct i40e_pf *pf = vsi->back;
74608d17 10241 struct i40e_ring *ring;
41c445ff 10242
41c445ff 10243 /* Set basic values in the rings to be used later during open() */
d7397644 10244 for (i = 0; i < vsi->alloc_queue_pairs; i++) {
ac6c5e3d 10245 /* allocate space for both Tx and Rx in one shot */
74608d17
BT
10246 ring = kcalloc(qpv, sizeof(struct i40e_ring), GFP_KERNEL);
10247 if (!ring)
9f65e15b 10248 goto err_out;
41c445ff 10249
74608d17
BT
10250 ring->queue_index = i;
10251 ring->reg_idx = vsi->base_queue + i;
10252 ring->ring_active = false;
10253 ring->vsi = vsi;
10254 ring->netdev = vsi->netdev;
10255 ring->dev = &pf->pdev->dev;
10256 ring->count = vsi->num_desc;
10257 ring->size = 0;
10258 ring->dcb_tc = 0;
d36e41dc 10259 if (vsi->back->hw_features & I40E_HW_WB_ON_ITR_CAPABLE)
74608d17 10260 ring->flags = I40E_TXR_FLAGS_WB_ON_ITR;
40588ca6 10261 ring->itr_setting = pf->tx_itr_default;
74608d17
BT
10262 vsi->tx_rings[i] = ring++;
10263
10264 if (!i40e_enabled_xdp_vsi(vsi))
10265 goto setup_rx;
10266
10267 ring->queue_index = vsi->alloc_queue_pairs + i;
10268 ring->reg_idx = vsi->base_queue + ring->queue_index;
10269 ring->ring_active = false;
10270 ring->vsi = vsi;
10271 ring->netdev = NULL;
10272 ring->dev = &pf->pdev->dev;
10273 ring->count = vsi->num_desc;
10274 ring->size = 0;
10275 ring->dcb_tc = 0;
d36e41dc 10276 if (vsi->back->hw_features & I40E_HW_WB_ON_ITR_CAPABLE)
74608d17
BT
10277 ring->flags = I40E_TXR_FLAGS_WB_ON_ITR;
10278 set_ring_xdp(ring);
40588ca6 10279 ring->itr_setting = pf->tx_itr_default;
74608d17
BT
10280 vsi->xdp_rings[i] = ring++;
10281
10282setup_rx:
10283 ring->queue_index = i;
10284 ring->reg_idx = vsi->base_queue + i;
10285 ring->ring_active = false;
10286 ring->vsi = vsi;
10287 ring->netdev = vsi->netdev;
10288 ring->dev = &pf->pdev->dev;
10289 ring->count = vsi->num_desc;
10290 ring->size = 0;
10291 ring->dcb_tc = 0;
40588ca6 10292 ring->itr_setting = pf->rx_itr_default;
74608d17 10293 vsi->rx_rings[i] = ring;
41c445ff
JB
10294 }
10295
10296 return 0;
9f65e15b
AD
10297
10298err_out:
10299 i40e_vsi_clear_rings(vsi);
10300 return -ENOMEM;
41c445ff
JB
10301}
10302
10303/**
10304 * i40e_reserve_msix_vectors - Reserve MSI-X vectors in the kernel
10305 * @pf: board private structure
10306 * @vectors: the number of MSI-X vectors to request
10307 *
10308 * Returns the number of vectors reserved, or error
10309 **/
10310static int i40e_reserve_msix_vectors(struct i40e_pf *pf, int vectors)
10311{
7b37f376
AG
10312 vectors = pci_enable_msix_range(pf->pdev, pf->msix_entries,
10313 I40E_MIN_MSIX, vectors);
10314 if (vectors < 0) {
41c445ff 10315 dev_info(&pf->pdev->dev,
7b37f376 10316 "MSI-X vector reservation failed: %d\n", vectors);
41c445ff
JB
10317 vectors = 0;
10318 }
10319
10320 return vectors;
10321}
10322
10323/**
10324 * i40e_init_msix - Setup the MSIX capability
10325 * @pf: board private structure
10326 *
10327 * Work with the OS to set up the MSIX vectors needed.
10328 *
3b444399 10329 * Returns the number of vectors reserved or negative on failure
41c445ff
JB
10330 **/
10331static int i40e_init_msix(struct i40e_pf *pf)
10332{
41c445ff 10333 struct i40e_hw *hw = &pf->hw;
c0cf70a6 10334 int cpus, extra_vectors;
1e200e4a 10335 int vectors_left;
41c445ff 10336 int v_budget, i;
3b444399 10337 int v_actual;
e3219ce6 10338 int iwarp_requested = 0;
41c445ff
JB
10339
10340 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED))
10341 return -ENODEV;
10342
10343 /* The number of vectors we'll request will be comprised of:
10344 * - Add 1 for "other" cause for Admin Queue events, etc.
10345 * - The number of LAN queue pairs
f8ff1464
ASJ
10346 * - Queues being used for RSS.
10347 * We don't need as many as max_rss_size vectors.
10348 * use rss_size instead in the calculation since that
10349 * is governed by number of cpus in the system.
10350 * - assumes symmetric Tx/Rx pairing
41c445ff 10351 * - The number of VMDq pairs
e3219ce6 10352 * - The CPU count within the NUMA node if iWARP is enabled
41c445ff
JB
10353 * Once we count this up, try the request.
10354 *
10355 * If we can't get what we want, we'll simplify to nearly nothing
10356 * and try again. If that still fails, we punt.
10357 */
1e200e4a
SN
10358 vectors_left = hw->func_caps.num_msix_vectors;
10359 v_budget = 0;
10360
10361 /* reserve one vector for miscellaneous handler */
10362 if (vectors_left) {
10363 v_budget++;
10364 vectors_left--;
10365 }
10366
c0cf70a6
JK
10367 /* reserve some vectors for the main PF traffic queues. Initially we
10368 * only reserve at most 50% of the available vectors, in the case that
10369 * the number of online CPUs is large. This ensures that we can enable
10370 * extra features as well. Once we've enabled the other features, we
10371 * will use any remaining vectors to reach as close as we can to the
10372 * number of online CPUs.
10373 */
10374 cpus = num_online_cpus();
10375 pf->num_lan_msix = min_t(int, cpus, vectors_left / 2);
1e200e4a 10376 vectors_left -= pf->num_lan_msix;
1e200e4a
SN
10377
10378 /* reserve one vector for sideband flow director */
10379 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
10380 if (vectors_left) {
a70e407f 10381 pf->num_fdsb_msix = 1;
1e200e4a
SN
10382 v_budget++;
10383 vectors_left--;
10384 } else {
a70e407f 10385 pf->num_fdsb_msix = 0;
1e200e4a
SN
10386 }
10387 }
83840e4b 10388
e3219ce6
ASJ
10389 /* can we reserve enough for iWARP? */
10390 if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
4ce20abc
SA
10391 iwarp_requested = pf->num_iwarp_msix;
10392
e3219ce6
ASJ
10393 if (!vectors_left)
10394 pf->num_iwarp_msix = 0;
10395 else if (vectors_left < pf->num_iwarp_msix)
10396 pf->num_iwarp_msix = 1;
10397 v_budget += pf->num_iwarp_msix;
10398 vectors_left -= pf->num_iwarp_msix;
10399 }
10400
1e200e4a
SN
10401 /* any vectors left over go for VMDq support */
10402 if (pf->flags & I40E_FLAG_VMDQ_ENABLED) {
9ca57e97
SA
10403 if (!vectors_left) {
10404 pf->num_vmdq_msix = 0;
10405 pf->num_vmdq_qps = 0;
10406 } else {
ca12c9d4
PM
10407 int vmdq_vecs_wanted =
10408 pf->num_vmdq_vsis * pf->num_vmdq_qps;
10409 int vmdq_vecs =
10410 min_t(int, vectors_left, vmdq_vecs_wanted);
10411
9ca57e97
SA
10412 /* if we're short on vectors for what's desired, we limit
10413 * the queues per vmdq. If this is still more than are
10414 * available, the user will need to change the number of
10415 * queues/vectors used by the PF later with the ethtool
10416 * channels command
10417 */
ca12c9d4 10418 if (vectors_left < vmdq_vecs_wanted) {
9ca57e97 10419 pf->num_vmdq_qps = 1;
ca12c9d4
PM
10420 vmdq_vecs_wanted = pf->num_vmdq_vsis;
10421 vmdq_vecs = min_t(int,
10422 vectors_left,
10423 vmdq_vecs_wanted);
10424 }
9ca57e97 10425 pf->num_vmdq_msix = pf->num_vmdq_qps;
1e200e4a 10426
9ca57e97
SA
10427 v_budget += vmdq_vecs;
10428 vectors_left -= vmdq_vecs;
10429 }
1e200e4a 10430 }
41c445ff 10431
c0cf70a6
JK
10432 /* On systems with a large number of SMP cores, we previously limited
10433 * the number of vectors for num_lan_msix to be at most 50% of the
10434 * available vectors, to allow for other features. Now, we add back
10435 * the remaining vectors. However, we ensure that the total
10436 * num_lan_msix will not exceed num_online_cpus(). To do this, we
10437 * calculate the number of vectors we can add without going over the
10438 * cap of CPUs. For systems with a small number of CPUs this will be
10439 * zero.
10440 */
10441 extra_vectors = min_t(int, cpus - pf->num_lan_msix, vectors_left);
10442 pf->num_lan_msix += extra_vectors;
10443 vectors_left -= extra_vectors;
10444
10445 WARN(vectors_left < 0,
10446 "Calculation of remaining vectors underflowed. This is an accounting bug when determining total MSI-X vectors.\n");
10447
10448 v_budget += pf->num_lan_msix;
41c445ff
JB
10449 pf->msix_entries = kcalloc(v_budget, sizeof(struct msix_entry),
10450 GFP_KERNEL);
10451 if (!pf->msix_entries)
10452 return -ENOMEM;
10453
10454 for (i = 0; i < v_budget; i++)
10455 pf->msix_entries[i].entry = i;
3b444399 10456 v_actual = i40e_reserve_msix_vectors(pf, v_budget);
a34977ba 10457
3b444399 10458 if (v_actual < I40E_MIN_MSIX) {
41c445ff
JB
10459 pf->flags &= ~I40E_FLAG_MSIX_ENABLED;
10460 kfree(pf->msix_entries);
10461 pf->msix_entries = NULL;
4c95aa5d 10462 pci_disable_msix(pf->pdev);
41c445ff
JB
10463 return -ENODEV;
10464
3b444399 10465 } else if (v_actual == I40E_MIN_MSIX) {
41c445ff 10466 /* Adjust for minimal MSIX use */
41c445ff
JB
10467 pf->num_vmdq_vsis = 0;
10468 pf->num_vmdq_qps = 0;
41c445ff
JB
10469 pf->num_lan_qps = 1;
10470 pf->num_lan_msix = 1;
10471
3e6b1cf7 10472 } else if (v_actual != v_budget) {
4ce20abc
SA
10473 /* If we have limited resources, we will start with no vectors
10474 * for the special features and then allocate vectors to some
10475 * of these features based on the policy and at the end disable
10476 * the features that did not get any vectors.
10477 */
3b444399
SN
10478 int vec;
10479
4ce20abc 10480 dev_info(&pf->pdev->dev,
3e6b1cf7
SN
10481 "MSI-X vector limit reached with %d, wanted %d, attempting to redistribute vectors\n",
10482 v_actual, v_budget);
a34977ba 10483 /* reserve the misc vector */
3b444399 10484 vec = v_actual - 1;
a34977ba 10485
41c445ff
JB
10486 /* Scale vector usage down */
10487 pf->num_vmdq_msix = 1; /* force VMDqs to only one vector */
a34977ba 10488 pf->num_vmdq_vsis = 1;
1e200e4a 10489 pf->num_vmdq_qps = 1;
41c445ff
JB
10490
10491 /* partition out the remaining vectors */
10492 switch (vec) {
10493 case 2:
41c445ff
JB
10494 pf->num_lan_msix = 1;
10495 break;
10496 case 3:
e3219ce6
ASJ
10497 if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
10498 pf->num_lan_msix = 1;
10499 pf->num_iwarp_msix = 1;
10500 } else {
10501 pf->num_lan_msix = 2;
10502 }
41c445ff
JB
10503 break;
10504 default:
e3219ce6
ASJ
10505 if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
10506 pf->num_iwarp_msix = min_t(int, (vec / 3),
10507 iwarp_requested);
10508 pf->num_vmdq_vsis = min_t(int, (vec / 3),
10509 I40E_DEFAULT_NUM_VMDQ_VSI);
10510 } else {
10511 pf->num_vmdq_vsis = min_t(int, (vec / 2),
10512 I40E_DEFAULT_NUM_VMDQ_VSI);
10513 }
abd97a94
SA
10514 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
10515 pf->num_fdsb_msix = 1;
10516 vec--;
10517 }
e3219ce6
ASJ
10518 pf->num_lan_msix = min_t(int,
10519 (vec - (pf->num_iwarp_msix + pf->num_vmdq_vsis)),
10520 pf->num_lan_msix);
4ce20abc 10521 pf->num_lan_qps = pf->num_lan_msix;
41c445ff
JB
10522 break;
10523 }
10524 }
10525
abd97a94
SA
10526 if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
10527 (pf->num_fdsb_msix == 0)) {
10528 dev_info(&pf->pdev->dev, "Sideband Flowdir disabled, not enough MSI-X vectors\n");
10529 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
2f4b411a 10530 pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
abd97a94 10531 }
a34977ba
ASJ
10532 if ((pf->flags & I40E_FLAG_VMDQ_ENABLED) &&
10533 (pf->num_vmdq_msix == 0)) {
10534 dev_info(&pf->pdev->dev, "VMDq disabled, not enough MSI-X vectors\n");
10535 pf->flags &= ~I40E_FLAG_VMDQ_ENABLED;
10536 }
e3219ce6
ASJ
10537
10538 if ((pf->flags & I40E_FLAG_IWARP_ENABLED) &&
10539 (pf->num_iwarp_msix == 0)) {
10540 dev_info(&pf->pdev->dev, "IWARP disabled, not enough MSI-X vectors\n");
10541 pf->flags &= ~I40E_FLAG_IWARP_ENABLED;
10542 }
4ce20abc
SA
10543 i40e_debug(&pf->hw, I40E_DEBUG_INIT,
10544 "MSI-X vector distribution: PF %d, VMDq %d, FDSB %d, iWARP %d\n",
10545 pf->num_lan_msix,
10546 pf->num_vmdq_msix * pf->num_vmdq_vsis,
10547 pf->num_fdsb_msix,
10548 pf->num_iwarp_msix);
10549
3b444399 10550 return v_actual;
41c445ff
JB
10551}
10552
493fb300 10553/**
90e04070 10554 * i40e_vsi_alloc_q_vector - Allocate memory for a single interrupt vector
493fb300
AD
10555 * @vsi: the VSI being configured
10556 * @v_idx: index of the vector in the vsi struct
7f6c5539 10557 * @cpu: cpu to be used on affinity_mask
493fb300
AD
10558 *
10559 * We allocate one q_vector. If allocation fails we return -ENOMEM.
10560 **/
7f6c5539 10561static int i40e_vsi_alloc_q_vector(struct i40e_vsi *vsi, int v_idx, int cpu)
493fb300
AD
10562{
10563 struct i40e_q_vector *q_vector;
10564
10565 /* allocate q_vector */
10566 q_vector = kzalloc(sizeof(struct i40e_q_vector), GFP_KERNEL);
10567 if (!q_vector)
10568 return -ENOMEM;
10569
10570 q_vector->vsi = vsi;
10571 q_vector->v_idx = v_idx;
759dc4a7 10572 cpumask_copy(&q_vector->affinity_mask, cpu_possible_mask);
7f6c5539 10573
493fb300
AD
10574 if (vsi->netdev)
10575 netif_napi_add(vsi->netdev, &q_vector->napi,
eefeacee 10576 i40e_napi_poll, NAPI_POLL_WEIGHT);
493fb300
AD
10577
10578 /* tie q_vector and vsi together */
10579 vsi->q_vectors[v_idx] = q_vector;
10580
10581 return 0;
10582}
10583
41c445ff 10584/**
90e04070 10585 * i40e_vsi_alloc_q_vectors - Allocate memory for interrupt vectors
41c445ff
JB
10586 * @vsi: the VSI being configured
10587 *
10588 * We allocate one q_vector per queue interrupt. If allocation fails we
10589 * return -ENOMEM.
10590 **/
90e04070 10591static int i40e_vsi_alloc_q_vectors(struct i40e_vsi *vsi)
41c445ff
JB
10592{
10593 struct i40e_pf *pf = vsi->back;
7f6c5539 10594 int err, v_idx, num_q_vectors, current_cpu;
41c445ff
JB
10595
10596 /* if not MSIX, give the one vector only to the LAN VSI */
10597 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
10598 num_q_vectors = vsi->num_q_vectors;
10599 else if (vsi == pf->vsi[pf->lan_vsi])
10600 num_q_vectors = 1;
10601 else
10602 return -EINVAL;
10603
7f6c5539
GP
10604 current_cpu = cpumask_first(cpu_online_mask);
10605
41c445ff 10606 for (v_idx = 0; v_idx < num_q_vectors; v_idx++) {
7f6c5539 10607 err = i40e_vsi_alloc_q_vector(vsi, v_idx, current_cpu);
493fb300
AD
10608 if (err)
10609 goto err_out;
7f6c5539
GP
10610 current_cpu = cpumask_next(current_cpu, cpu_online_mask);
10611 if (unlikely(current_cpu >= nr_cpu_ids))
10612 current_cpu = cpumask_first(cpu_online_mask);
41c445ff
JB
10613 }
10614
10615 return 0;
493fb300
AD
10616
10617err_out:
10618 while (v_idx--)
10619 i40e_free_q_vector(vsi, v_idx);
10620
10621 return err;
41c445ff
JB
10622}
10623
10624/**
10625 * i40e_init_interrupt_scheme - Determine proper interrupt scheme
10626 * @pf: board private structure to initialize
10627 **/
c1147280 10628static int i40e_init_interrupt_scheme(struct i40e_pf *pf)
41c445ff 10629{
3b444399
SN
10630 int vectors = 0;
10631 ssize_t size;
41c445ff
JB
10632
10633 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
3b444399
SN
10634 vectors = i40e_init_msix(pf);
10635 if (vectors < 0) {
60ea5f83 10636 pf->flags &= ~(I40E_FLAG_MSIX_ENABLED |
e3219ce6 10637 I40E_FLAG_IWARP_ENABLED |
60ea5f83 10638 I40E_FLAG_RSS_ENABLED |
4d9b6043 10639 I40E_FLAG_DCB_CAPABLE |
a036244c 10640 I40E_FLAG_DCB_ENABLED |
60ea5f83
JB
10641 I40E_FLAG_SRIOV_ENABLED |
10642 I40E_FLAG_FD_SB_ENABLED |
10643 I40E_FLAG_FD_ATR_ENABLED |
10644 I40E_FLAG_VMDQ_ENABLED);
2f4b411a 10645 pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
41c445ff
JB
10646
10647 /* rework the queue expectations without MSIX */
10648 i40e_determine_queue_usage(pf);
10649 }
10650 }
10651
10652 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED) &&
10653 (pf->flags & I40E_FLAG_MSI_ENABLED)) {
77fa28be 10654 dev_info(&pf->pdev->dev, "MSI-X not available, trying MSI\n");
3b444399
SN
10655 vectors = pci_enable_msi(pf->pdev);
10656 if (vectors < 0) {
10657 dev_info(&pf->pdev->dev, "MSI init failed - %d\n",
10658 vectors);
41c445ff
JB
10659 pf->flags &= ~I40E_FLAG_MSI_ENABLED;
10660 }
3b444399 10661 vectors = 1; /* one MSI or Legacy vector */
41c445ff
JB
10662 }
10663
958a3e3b 10664 if (!(pf->flags & (I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED)))
77fa28be 10665 dev_info(&pf->pdev->dev, "MSI-X and MSI not available, falling back to Legacy IRQ\n");
958a3e3b 10666
3b444399
SN
10667 /* set up vector assignment tracking */
10668 size = sizeof(struct i40e_lump_tracking) + (sizeof(u16) * vectors);
10669 pf->irq_pile = kzalloc(size, GFP_KERNEL);
557450c3 10670 if (!pf->irq_pile)
c1147280 10671 return -ENOMEM;
557450c3 10672
3b444399
SN
10673 pf->irq_pile->num_entries = vectors;
10674 pf->irq_pile->search_hint = 0;
10675
c1147280 10676 /* track first vector for misc interrupts, ignore return */
3b444399 10677 (void)i40e_get_lump(pf, pf->irq_pile, 1, I40E_PILE_VALID_BIT - 1);
c1147280
JB
10678
10679 return 0;
41c445ff
JB
10680}
10681
b980c063
JK
10682/**
10683 * i40e_restore_interrupt_scheme - Restore the interrupt scheme
10684 * @pf: private board data structure
10685 *
10686 * Restore the interrupt scheme that was cleared when we suspended the
10687 * device. This should be called during resume to re-allocate the q_vectors
10688 * and reacquire IRQs.
10689 */
10690static int i40e_restore_interrupt_scheme(struct i40e_pf *pf)
10691{
10692 int err, i;
10693
10694 /* We cleared the MSI and MSI-X flags when disabling the old interrupt
10695 * scheme. We need to re-enabled them here in order to attempt to
10696 * re-acquire the MSI or MSI-X vectors
10697 */
10698 pf->flags |= (I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED);
10699
10700 err = i40e_init_interrupt_scheme(pf);
10701 if (err)
10702 return err;
10703
10704 /* Now that we've re-acquired IRQs, we need to remap the vectors and
10705 * rings together again.
10706 */
10707 for (i = 0; i < pf->num_alloc_vsi; i++) {
10708 if (pf->vsi[i]) {
10709 err = i40e_vsi_alloc_q_vectors(pf->vsi[i]);
10710 if (err)
10711 goto err_unwind;
10712 i40e_vsi_map_rings_to_vectors(pf->vsi[i]);
10713 }
10714 }
10715
10716 err = i40e_setup_misc_vector(pf);
10717 if (err)
10718 goto err_unwind;
10719
ddbb8d5d
SS
10720 if (pf->flags & I40E_FLAG_IWARP_ENABLED)
10721 i40e_client_update_msix_info(pf);
10722
b980c063
JK
10723 return 0;
10724
10725err_unwind:
10726 while (i--) {
10727 if (pf->vsi[i])
10728 i40e_vsi_free_q_vectors(pf->vsi[i]);
10729 }
10730
10731 return err;
10732}
b980c063 10733
41c445ff
JB
10734/**
10735 * i40e_setup_misc_vector - Setup the misc vector to handle non queue events
10736 * @pf: board private structure
10737 *
10738 * This sets up the handler for MSIX 0, which is used to manage the
10739 * non-queue interrupts, e.g. AdminQ and errors. This is not used
10740 * when in MSI or Legacy interrupt mode.
10741 **/
10742static int i40e_setup_misc_vector(struct i40e_pf *pf)
10743{
10744 struct i40e_hw *hw = &pf->hw;
10745 int err = 0;
10746
c17401a1
JK
10747 /* Only request the IRQ once, the first time through. */
10748 if (!test_and_set_bit(__I40E_MISC_IRQ_REQUESTED, pf->state)) {
41c445ff 10749 err = request_irq(pf->msix_entries[0].vector,
b294ac70 10750 i40e_intr, 0, pf->int_name, pf);
41c445ff 10751 if (err) {
c17401a1 10752 clear_bit(__I40E_MISC_IRQ_REQUESTED, pf->state);
41c445ff 10753 dev_info(&pf->pdev->dev,
77fa28be 10754 "request_irq for %s failed: %d\n",
b294ac70 10755 pf->int_name, err);
41c445ff
JB
10756 return -EFAULT;
10757 }
10758 }
10759
ab437b5a 10760 i40e_enable_misc_int_causes(pf);
41c445ff
JB
10761
10762 /* associate no queues to the misc vector */
10763 wr32(hw, I40E_PFINT_LNKLST0, I40E_QUEUE_END_OF_LIST);
10764 wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), I40E_ITR_8K);
10765
10766 i40e_flush(hw);
10767
dbadbbe2 10768 i40e_irq_dynamic_enable_icr0(pf);
41c445ff
JB
10769
10770 return err;
10771}
10772
95a73780
ASJ
10773/**
10774 * i40e_get_rss_aq - Get RSS keys and lut by using AQ commands
10775 * @vsi: Pointer to vsi structure
10776 * @seed: Buffter to store the hash keys
10777 * @lut: Buffer to store the lookup table entries
10778 * @lut_size: Size of buffer to store the lookup table entries
10779 *
10780 * Return 0 on success, negative on failure
10781 */
10782static int i40e_get_rss_aq(struct i40e_vsi *vsi, const u8 *seed,
10783 u8 *lut, u16 lut_size)
10784{
10785 struct i40e_pf *pf = vsi->back;
10786 struct i40e_hw *hw = &pf->hw;
10787 int ret = 0;
10788
10789 if (seed) {
10790 ret = i40e_aq_get_rss_key(hw, vsi->id,
10791 (struct i40e_aqc_get_set_rss_key_data *)seed);
10792 if (ret) {
10793 dev_info(&pf->pdev->dev,
10794 "Cannot get RSS key, err %s aq_err %s\n",
10795 i40e_stat_str(&pf->hw, ret),
10796 i40e_aq_str(&pf->hw,
10797 pf->hw.aq.asq_last_status));
10798 return ret;
10799 }
10800 }
10801
10802 if (lut) {
10803 bool pf_lut = vsi->type == I40E_VSI_MAIN ? true : false;
10804
10805 ret = i40e_aq_get_rss_lut(hw, vsi->id, pf_lut, lut, lut_size);
10806 if (ret) {
10807 dev_info(&pf->pdev->dev,
10808 "Cannot get RSS lut, err %s aq_err %s\n",
10809 i40e_stat_str(&pf->hw, ret),
10810 i40e_aq_str(&pf->hw,
10811 pf->hw.aq.asq_last_status));
10812 return ret;
10813 }
10814 }
10815
10816 return ret;
10817}
10818
e25d00b8 10819/**
043dd650 10820 * i40e_config_rss_reg - Configure RSS keys and lut by writing registers
e69ff813 10821 * @vsi: Pointer to vsi structure
e25d00b8 10822 * @seed: RSS hash seed
e69ff813
HZ
10823 * @lut: Lookup table
10824 * @lut_size: Lookup table size
10825 *
10826 * Returns 0 on success, negative on failure
41c445ff 10827 **/
e69ff813
HZ
10828static int i40e_config_rss_reg(struct i40e_vsi *vsi, const u8 *seed,
10829 const u8 *lut, u16 lut_size)
41c445ff 10830{
e69ff813 10831 struct i40e_pf *pf = vsi->back;
4617e8c0 10832 struct i40e_hw *hw = &pf->hw;
c4e1868c 10833 u16 vf_id = vsi->vf_id;
e69ff813 10834 u8 i;
41c445ff 10835
e25d00b8 10836 /* Fill out hash function seed */
e69ff813
HZ
10837 if (seed) {
10838 u32 *seed_dw = (u32 *)seed;
10839
c4e1868c
MW
10840 if (vsi->type == I40E_VSI_MAIN) {
10841 for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
26f77e53 10842 wr32(hw, I40E_PFQF_HKEY(i), seed_dw[i]);
c4e1868c
MW
10843 } else if (vsi->type == I40E_VSI_SRIOV) {
10844 for (i = 0; i <= I40E_VFQF_HKEY1_MAX_INDEX; i++)
26f77e53 10845 wr32(hw, I40E_VFQF_HKEY1(i, vf_id), seed_dw[i]);
c4e1868c
MW
10846 } else {
10847 dev_err(&pf->pdev->dev, "Cannot set RSS seed - invalid VSI type\n");
10848 }
e69ff813
HZ
10849 }
10850
10851 if (lut) {
10852 u32 *lut_dw = (u32 *)lut;
10853
c4e1868c
MW
10854 if (vsi->type == I40E_VSI_MAIN) {
10855 if (lut_size != I40E_HLUT_ARRAY_SIZE)
10856 return -EINVAL;
10857 for (i = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++)
10858 wr32(hw, I40E_PFQF_HLUT(i), lut_dw[i]);
10859 } else if (vsi->type == I40E_VSI_SRIOV) {
10860 if (lut_size != I40E_VF_HLUT_ARRAY_SIZE)
10861 return -EINVAL;
10862 for (i = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++)
26f77e53 10863 wr32(hw, I40E_VFQF_HLUT1(i, vf_id), lut_dw[i]);
c4e1868c
MW
10864 } else {
10865 dev_err(&pf->pdev->dev, "Cannot set RSS LUT - invalid VSI type\n");
10866 }
e25d00b8
ASJ
10867 }
10868 i40e_flush(hw);
10869
10870 return 0;
10871}
10872
043dd650
HZ
10873/**
10874 * i40e_get_rss_reg - Get the RSS keys and lut by reading registers
10875 * @vsi: Pointer to VSI structure
10876 * @seed: Buffer to store the keys
10877 * @lut: Buffer to store the lookup table entries
10878 * @lut_size: Size of buffer to store the lookup table entries
10879 *
10880 * Returns 0 on success, negative on failure
10881 */
10882static int i40e_get_rss_reg(struct i40e_vsi *vsi, u8 *seed,
10883 u8 *lut, u16 lut_size)
10884{
10885 struct i40e_pf *pf = vsi->back;
10886 struct i40e_hw *hw = &pf->hw;
10887 u16 i;
10888
10889 if (seed) {
10890 u32 *seed_dw = (u32 *)seed;
10891
10892 for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
272cdaf2 10893 seed_dw[i] = i40e_read_rx_ctl(hw, I40E_PFQF_HKEY(i));
043dd650
HZ
10894 }
10895 if (lut) {
10896 u32 *lut_dw = (u32 *)lut;
10897
10898 if (lut_size != I40E_HLUT_ARRAY_SIZE)
10899 return -EINVAL;
10900 for (i = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++)
10901 lut_dw[i] = rd32(hw, I40E_PFQF_HLUT(i));
10902 }
10903
10904 return 0;
10905}
10906
10907/**
10908 * i40e_config_rss - Configure RSS keys and lut
10909 * @vsi: Pointer to VSI structure
10910 * @seed: RSS hash seed
10911 * @lut: Lookup table
10912 * @lut_size: Lookup table size
10913 *
10914 * Returns 0 on success, negative on failure
10915 */
10916int i40e_config_rss(struct i40e_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size)
10917{
10918 struct i40e_pf *pf = vsi->back;
10919
d36e41dc 10920 if (pf->hw_features & I40E_HW_RSS_AQ_CAPABLE)
043dd650
HZ
10921 return i40e_config_rss_aq(vsi, seed, lut, lut_size);
10922 else
10923 return i40e_config_rss_reg(vsi, seed, lut, lut_size);
10924}
10925
10926/**
10927 * i40e_get_rss - Get RSS keys and lut
10928 * @vsi: Pointer to VSI structure
10929 * @seed: Buffer to store the keys
10930 * @lut: Buffer to store the lookup table entries
f5254429 10931 * @lut_size: Size of buffer to store the lookup table entries
043dd650
HZ
10932 *
10933 * Returns 0 on success, negative on failure
10934 */
10935int i40e_get_rss(struct i40e_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size)
10936{
95a73780
ASJ
10937 struct i40e_pf *pf = vsi->back;
10938
d36e41dc 10939 if (pf->hw_features & I40E_HW_RSS_AQ_CAPABLE)
95a73780
ASJ
10940 return i40e_get_rss_aq(vsi, seed, lut, lut_size);
10941 else
10942 return i40e_get_rss_reg(vsi, seed, lut, lut_size);
043dd650
HZ
10943}
10944
e69ff813
HZ
10945/**
10946 * i40e_fill_rss_lut - Fill the RSS lookup table with default values
10947 * @pf: Pointer to board private structure
10948 * @lut: Lookup table
10949 * @rss_table_size: Lookup table size
10950 * @rss_size: Range of queue number for hashing
10951 */
f1582351
AB
10952void i40e_fill_rss_lut(struct i40e_pf *pf, u8 *lut,
10953 u16 rss_table_size, u16 rss_size)
e69ff813
HZ
10954{
10955 u16 i;
10956
10957 for (i = 0; i < rss_table_size; i++)
10958 lut[i] = i % rss_size;
10959}
10960
e25d00b8 10961/**
043dd650 10962 * i40e_pf_config_rss - Prepare for RSS if used
e25d00b8
ASJ
10963 * @pf: board private structure
10964 **/
043dd650 10965static int i40e_pf_config_rss(struct i40e_pf *pf)
e25d00b8
ASJ
10966{
10967 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
10968 u8 seed[I40E_HKEY_ARRAY_SIZE];
e69ff813 10969 u8 *lut;
e25d00b8
ASJ
10970 struct i40e_hw *hw = &pf->hw;
10971 u32 reg_val;
10972 u64 hena;
e69ff813 10973 int ret;
e25d00b8 10974
41c445ff 10975 /* By default we enable TCP/UDP with IPv4/IPv6 ptypes */
272cdaf2
SN
10976 hena = (u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)) |
10977 ((u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1)) << 32);
e25d00b8
ASJ
10978 hena |= i40e_pf_get_default_rss_hena(pf);
10979
272cdaf2
SN
10980 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (u32)hena);
10981 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32));
41c445ff 10982
e25d00b8 10983 /* Determine the RSS table size based on the hardware capabilities */
272cdaf2 10984 reg_val = i40e_read_rx_ctl(hw, I40E_PFQF_CTL_0);
e25d00b8
ASJ
10985 reg_val = (pf->rss_table_size == 512) ?
10986 (reg_val | I40E_PFQF_CTL_0_HASHLUTSIZE_512) :
10987 (reg_val & ~I40E_PFQF_CTL_0_HASHLUTSIZE_512);
272cdaf2 10988 i40e_write_rx_ctl(hw, I40E_PFQF_CTL_0, reg_val);
e157ea30 10989
28c5869f 10990 /* Determine the RSS size of the VSI */
f25571b5
HR
10991 if (!vsi->rss_size) {
10992 u16 qcount;
b356dac8
MW
10993 /* If the firmware does something weird during VSI init, we
10994 * could end up with zero TCs. Check for that to avoid
10995 * divide-by-zero. It probably won't pass traffic, but it also
10996 * won't panic.
10997 */
10998 qcount = vsi->num_queue_pairs /
10999 (vsi->tc_config.numtc ? vsi->tc_config.numtc : 1);
f25571b5
HR
11000 vsi->rss_size = min_t(int, pf->alloc_rss_size, qcount);
11001 }
a4fa59cc
MW
11002 if (!vsi->rss_size)
11003 return -EINVAL;
28c5869f 11004
e69ff813
HZ
11005 lut = kzalloc(vsi->rss_table_size, GFP_KERNEL);
11006 if (!lut)
11007 return -ENOMEM;
11008
28c5869f
HZ
11009 /* Use user configured lut if there is one, otherwise use default */
11010 if (vsi->rss_lut_user)
11011 memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size);
11012 else
11013 i40e_fill_rss_lut(pf, lut, vsi->rss_table_size, vsi->rss_size);
e69ff813 11014
28c5869f
HZ
11015 /* Use user configured hash key if there is one, otherwise
11016 * use default.
11017 */
11018 if (vsi->rss_hkey_user)
11019 memcpy(seed, vsi->rss_hkey_user, I40E_HKEY_ARRAY_SIZE);
11020 else
11021 netdev_rss_key_fill((void *)seed, I40E_HKEY_ARRAY_SIZE);
043dd650 11022 ret = i40e_config_rss(vsi, seed, lut, vsi->rss_table_size);
e69ff813
HZ
11023 kfree(lut);
11024
11025 return ret;
41c445ff
JB
11026}
11027
f8ff1464
ASJ
11028/**
11029 * i40e_reconfig_rss_queues - change number of queues for rss and rebuild
11030 * @pf: board private structure
11031 * @queue_count: the requested queue count for rss.
11032 *
11033 * returns 0 if rss is not enabled, if enabled returns the final rss queue
11034 * count which may be different from the requested queue count.
373149fc 11035 * Note: expects to be called while under rtnl_lock()
f8ff1464
ASJ
11036 **/
11037int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count)
11038{
9a3bd2f1
ASJ
11039 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
11040 int new_rss_size;
11041
f8ff1464
ASJ
11042 if (!(pf->flags & I40E_FLAG_RSS_ENABLED))
11043 return 0;
11044
3647cd6e 11045 queue_count = min_t(int, queue_count, num_online_cpus());
9a3bd2f1 11046 new_rss_size = min_t(int, queue_count, pf->rss_size_max);
f8ff1464 11047
9a3bd2f1 11048 if (queue_count != vsi->num_queue_pairs) {
f25571b5
HR
11049 u16 qcount;
11050
9a3bd2f1 11051 vsi->req_queue_pairs = queue_count;
373149fc 11052 i40e_prep_for_reset(pf, true);
f8ff1464 11053
acd65448 11054 pf->alloc_rss_size = new_rss_size;
f8ff1464 11055
373149fc 11056 i40e_reset_and_rebuild(pf, true, true);
28c5869f
HZ
11057
11058 /* Discard the user configured hash keys and lut, if less
11059 * queues are enabled.
11060 */
11061 if (queue_count < vsi->rss_size) {
11062 i40e_clear_rss_config_user(vsi);
11063 dev_dbg(&pf->pdev->dev,
11064 "discard user configured hash keys and lut\n");
11065 }
11066
11067 /* Reset vsi->rss_size, as number of enabled queues changed */
f25571b5
HR
11068 qcount = vsi->num_queue_pairs / vsi->tc_config.numtc;
11069 vsi->rss_size = min_t(int, pf->alloc_rss_size, qcount);
28c5869f 11070
043dd650 11071 i40e_pf_config_rss(pf);
f8ff1464 11072 }
12815057
LY
11073 dev_info(&pf->pdev->dev, "User requested queue count/HW max RSS count: %d/%d\n",
11074 vsi->req_queue_pairs, pf->rss_size_max);
acd65448 11075 return pf->alloc_rss_size;
f8ff1464
ASJ
11076}
11077
f4492db1 11078/**
4fc8c676 11079 * i40e_get_partition_bw_setting - Retrieve BW settings for this PF partition
f4492db1
GR
11080 * @pf: board private structure
11081 **/
4fc8c676 11082i40e_status i40e_get_partition_bw_setting(struct i40e_pf *pf)
f4492db1
GR
11083{
11084 i40e_status status;
11085 bool min_valid, max_valid;
11086 u32 max_bw, min_bw;
11087
11088 status = i40e_read_bw_from_alt_ram(&pf->hw, &max_bw, &min_bw,
11089 &min_valid, &max_valid);
11090
11091 if (!status) {
11092 if (min_valid)
4fc8c676 11093 pf->min_bw = min_bw;
f4492db1 11094 if (max_valid)
4fc8c676 11095 pf->max_bw = max_bw;
f4492db1
GR
11096 }
11097
11098 return status;
11099}
11100
11101/**
4fc8c676 11102 * i40e_set_partition_bw_setting - Set BW settings for this PF partition
f4492db1
GR
11103 * @pf: board private structure
11104 **/
4fc8c676 11105i40e_status i40e_set_partition_bw_setting(struct i40e_pf *pf)
f4492db1
GR
11106{
11107 struct i40e_aqc_configure_partition_bw_data bw_data;
11108 i40e_status status;
11109
b40c82e6 11110 /* Set the valid bit for this PF */
41a1d04b 11111 bw_data.pf_valid_bits = cpu_to_le16(BIT(pf->hw.pf_id));
4fc8c676
SN
11112 bw_data.max_bw[pf->hw.pf_id] = pf->max_bw & I40E_ALT_BW_VALUE_MASK;
11113 bw_data.min_bw[pf->hw.pf_id] = pf->min_bw & I40E_ALT_BW_VALUE_MASK;
f4492db1
GR
11114
11115 /* Set the new bandwidths */
11116 status = i40e_aq_configure_partition_bw(&pf->hw, &bw_data, NULL);
11117
11118 return status;
11119}
11120
11121/**
4fc8c676 11122 * i40e_commit_partition_bw_setting - Commit BW settings for this PF partition
f4492db1
GR
11123 * @pf: board private structure
11124 **/
4fc8c676 11125i40e_status i40e_commit_partition_bw_setting(struct i40e_pf *pf)
f4492db1
GR
11126{
11127 /* Commit temporary BW setting to permanent NVM image */
11128 enum i40e_admin_queue_err last_aq_status;
11129 i40e_status ret;
11130 u16 nvm_word;
11131
11132 if (pf->hw.partition_id != 1) {
11133 dev_info(&pf->pdev->dev,
11134 "Commit BW only works on partition 1! This is partition %d",
11135 pf->hw.partition_id);
11136 ret = I40E_NOT_SUPPORTED;
11137 goto bw_commit_out;
11138 }
11139
11140 /* Acquire NVM for read access */
11141 ret = i40e_acquire_nvm(&pf->hw, I40E_RESOURCE_READ);
11142 last_aq_status = pf->hw.aq.asq_last_status;
11143 if (ret) {
11144 dev_info(&pf->pdev->dev,
f1c7e72e
SN
11145 "Cannot acquire NVM for read access, err %s aq_err %s\n",
11146 i40e_stat_str(&pf->hw, ret),
11147 i40e_aq_str(&pf->hw, last_aq_status));
f4492db1
GR
11148 goto bw_commit_out;
11149 }
11150
11151 /* Read word 0x10 of NVM - SW compatibility word 1 */
11152 ret = i40e_aq_read_nvm(&pf->hw,
11153 I40E_SR_NVM_CONTROL_WORD,
11154 0x10, sizeof(nvm_word), &nvm_word,
11155 false, NULL);
11156 /* Save off last admin queue command status before releasing
11157 * the NVM
11158 */
11159 last_aq_status = pf->hw.aq.asq_last_status;
11160 i40e_release_nvm(&pf->hw);
11161 if (ret) {
f1c7e72e
SN
11162 dev_info(&pf->pdev->dev, "NVM read error, err %s aq_err %s\n",
11163 i40e_stat_str(&pf->hw, ret),
11164 i40e_aq_str(&pf->hw, last_aq_status));
f4492db1
GR
11165 goto bw_commit_out;
11166 }
11167
11168 /* Wait a bit for NVM release to complete */
11169 msleep(50);
11170
11171 /* Acquire NVM for write access */
11172 ret = i40e_acquire_nvm(&pf->hw, I40E_RESOURCE_WRITE);
11173 last_aq_status = pf->hw.aq.asq_last_status;
11174 if (ret) {
11175 dev_info(&pf->pdev->dev,
f1c7e72e
SN
11176 "Cannot acquire NVM for write access, err %s aq_err %s\n",
11177 i40e_stat_str(&pf->hw, ret),
11178 i40e_aq_str(&pf->hw, last_aq_status));
f4492db1
GR
11179 goto bw_commit_out;
11180 }
11181 /* Write it back out unchanged to initiate update NVM,
11182 * which will force a write of the shadow (alt) RAM to
11183 * the NVM - thus storing the bandwidth values permanently.
11184 */
11185 ret = i40e_aq_update_nvm(&pf->hw,
11186 I40E_SR_NVM_CONTROL_WORD,
11187 0x10, sizeof(nvm_word),
e3a5d6e6 11188 &nvm_word, true, 0, NULL);
f4492db1
GR
11189 /* Save off last admin queue command status before releasing
11190 * the NVM
11191 */
11192 last_aq_status = pf->hw.aq.asq_last_status;
11193 i40e_release_nvm(&pf->hw);
11194 if (ret)
11195 dev_info(&pf->pdev->dev,
f1c7e72e
SN
11196 "BW settings NOT SAVED, err %s aq_err %s\n",
11197 i40e_stat_str(&pf->hw, ret),
11198 i40e_aq_str(&pf->hw, last_aq_status));
f4492db1
GR
11199bw_commit_out:
11200
11201 return ret;
11202}
11203
41c445ff
JB
11204/**
11205 * i40e_sw_init - Initialize general software structures (struct i40e_pf)
11206 * @pf: board private structure to initialize
11207 *
11208 * i40e_sw_init initializes the Adapter private data structure.
11209 * Fields are initialized based on PCI device information and
11210 * OS network device settings (MTU size).
11211 **/
11212static int i40e_sw_init(struct i40e_pf *pf)
11213{
11214 int err = 0;
11215 int size;
11216
41c445ff
JB
11217 /* Set default capability flags */
11218 pf->flags = I40E_FLAG_RX_CSUM_ENABLED |
11219 I40E_FLAG_MSI_ENABLED |
2bc7ee8a
MW
11220 I40E_FLAG_MSIX_ENABLED;
11221
ca99eb99 11222 /* Set default ITR */
42702559
JK
11223 pf->rx_itr_default = I40E_ITR_RX_DEF;
11224 pf->tx_itr_default = I40E_ITR_TX_DEF;
ca99eb99 11225
7134f9ce
JB
11226 /* Depending on PF configurations, it is possible that the RSS
11227 * maximum might end up larger than the available queues
11228 */
41a1d04b 11229 pf->rss_size_max = BIT(pf->hw.func_caps.rss_table_entry_width);
acd65448 11230 pf->alloc_rss_size = 1;
5db4cb59 11231 pf->rss_table_size = pf->hw.func_caps.rss_table_size;
7134f9ce
JB
11232 pf->rss_size_max = min_t(int, pf->rss_size_max,
11233 pf->hw.func_caps.num_tx_qp);
41c445ff
JB
11234 if (pf->hw.func_caps.rss) {
11235 pf->flags |= I40E_FLAG_RSS_ENABLED;
acd65448
HZ
11236 pf->alloc_rss_size = min_t(int, pf->rss_size_max,
11237 num_online_cpus());
41c445ff
JB
11238 }
11239
2050bc65 11240 /* MFP mode enabled */
c78b953e 11241 if (pf->hw.func_caps.npar_enable || pf->hw.func_caps.flex10_enable) {
2050bc65
CS
11242 pf->flags |= I40E_FLAG_MFP_ENABLED;
11243 dev_info(&pf->pdev->dev, "MFP mode Enabled\n");
4fc8c676 11244 if (i40e_get_partition_bw_setting(pf)) {
f4492db1 11245 dev_warn(&pf->pdev->dev,
4fc8c676
SN
11246 "Could not get partition bw settings\n");
11247 } else {
f4492db1 11248 dev_info(&pf->pdev->dev,
4fc8c676
SN
11249 "Partition BW Min = %8.8x, Max = %8.8x\n",
11250 pf->min_bw, pf->max_bw);
11251
11252 /* nudge the Tx scheduler */
11253 i40e_set_partition_bw_setting(pf);
11254 }
2050bc65
CS
11255 }
11256
cbf61325
ASJ
11257 if ((pf->hw.func_caps.fd_filters_guaranteed > 0) ||
11258 (pf->hw.func_caps.fd_filters_best_effort > 0)) {
11259 pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
11260 pf->atr_sample_rate = I40E_DEFAULT_ATR_SAMPLE_RATE;
6eae9c6a
SN
11261 if (pf->flags & I40E_FLAG_MFP_ENABLED &&
11262 pf->hw.num_partitions > 1)
cbf61325 11263 dev_info(&pf->pdev->dev,
0b67584f 11264 "Flow Director Sideband mode Disabled in MFP mode\n");
6eae9c6a
SN
11265 else
11266 pf->flags |= I40E_FLAG_FD_SB_ENABLED;
cbf61325
ASJ
11267 pf->fdir_pf_filter_count =
11268 pf->hw.func_caps.fd_filters_guaranteed;
11269 pf->hw.fdir_shared_filter_count =
11270 pf->hw.func_caps.fd_filters_best_effort;
41c445ff
JB
11271 }
11272
5a433199 11273 if (pf->hw.mac.type == I40E_MAC_X722) {
d36e41dc
JK
11274 pf->hw_features |= (I40E_HW_RSS_AQ_CAPABLE |
11275 I40E_HW_128_QP_RSS_CAPABLE |
11276 I40E_HW_ATR_EVICT_CAPABLE |
11277 I40E_HW_WB_ON_ITR_CAPABLE |
11278 I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE |
11279 I40E_HW_NO_PCI_LINK_CHECK |
11280 I40E_HW_USE_SET_LLDP_MIB |
11281 I40E_HW_GENEVE_OFFLOAD_CAPABLE |
11282 I40E_HW_PTP_L4_CAPABLE |
11283 I40E_HW_WOL_MC_MAGIC_PKT_WAKE |
11284 I40E_HW_OUTER_UDP_CSUM_CAPABLE);
10a955ff
ASJ
11285
11286#define I40E_FDEVICT_PCTYPE_DEFAULT 0xc03
11287 if (rd32(&pf->hw, I40E_GLQF_FDEVICTENA(1)) !=
11288 I40E_FDEVICT_PCTYPE_DEFAULT) {
11289 dev_warn(&pf->pdev->dev,
11290 "FD EVICT PCTYPES are not right, disable FD HW EVICT\n");
11291 pf->hw_features &= ~I40E_HW_ATR_EVICT_CAPABLE;
11292 }
5a433199
ASJ
11293 } else if ((pf->hw.aq.api_maj_ver > 1) ||
11294 ((pf->hw.aq.api_maj_ver == 1) &&
11295 (pf->hw.aq.api_min_ver > 4))) {
11296 /* Supported in FW API version higher than 1.4 */
d36e41dc 11297 pf->hw_features |= I40E_HW_GENEVE_OFFLOAD_CAPABLE;
5a433199
ASJ
11298 }
11299
11300 /* Enable HW ATR eviction if possible */
d36e41dc 11301 if (pf->hw_features & I40E_HW_ATR_EVICT_CAPABLE)
5a433199
ASJ
11302 pf->flags |= I40E_FLAG_HW_ATR_EVICT_ENABLED;
11303
6de432c5 11304 if ((pf->hw.mac.type == I40E_MAC_XL710) &&
8eed76fa 11305 (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 33)) ||
f1bbad33 11306 (pf->hw.aq.fw_maj_ver < 4))) {
d36e41dc 11307 pf->hw_features |= I40E_HW_RESTART_AUTONEG;
f1bbad33 11308 /* No DCB support for FW < v4.33 */
d36e41dc 11309 pf->hw_features |= I40E_HW_NO_DCB_SUPPORT;
f1bbad33
NP
11310 }
11311
11312 /* Disable FW LLDP if FW < v4.3 */
6de432c5 11313 if ((pf->hw.mac.type == I40E_MAC_XL710) &&
f1bbad33
NP
11314 (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 3)) ||
11315 (pf->hw.aq.fw_maj_ver < 4)))
d36e41dc 11316 pf->hw_features |= I40E_HW_STOP_FW_LLDP;
f1bbad33
NP
11317
11318 /* Use the FW Set LLDP MIB API if FW > v4.40 */
6de432c5 11319 if ((pf->hw.mac.type == I40E_MAC_XL710) &&
f1bbad33
NP
11320 (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver >= 40)) ||
11321 (pf->hw.aq.fw_maj_ver >= 5)))
d36e41dc 11322 pf->hw_features |= I40E_HW_USE_SET_LLDP_MIB;
8eed76fa 11323
c3d26b75
AB
11324 /* Enable PTP L4 if FW > v6.0 */
11325 if (pf->hw.mac.type == I40E_MAC_XL710 &&
11326 pf->hw.aq.fw_maj_ver >= 6)
11327 pf->hw_features |= I40E_HW_PTP_L4_CAPABLE;
11328
69399873 11329 if (pf->hw.func_caps.vmdq && num_online_cpus() != 1) {
41c445ff 11330 pf->num_vmdq_vsis = I40E_DEFAULT_NUM_VMDQ_VSI;
e25d00b8 11331 pf->flags |= I40E_FLAG_VMDQ_ENABLED;
e9e53662 11332 pf->num_vmdq_qps = i40e_default_queues_per_vmdq(pf);
41c445ff
JB
11333 }
11334
69399873 11335 if (pf->hw.func_caps.iwarp && num_online_cpus() != 1) {
e3219ce6
ASJ
11336 pf->flags |= I40E_FLAG_IWARP_ENABLED;
11337 /* IWARP needs one extra vector for CQP just like MISC.*/
11338 pf->num_iwarp_msix = (int)num_online_cpus() + 1;
11339 }
5734fe87
PM
11340 /* Stopping FW LLDP engine is supported on XL710 and X722
11341 * starting from FW versions determined in i40e_init_adminq.
11342 * Stopping the FW LLDP engine is not supported on XL710
11343 * if NPAR is functioning so unset this hw flag in this case.
7b63435a
DE
11344 */
11345 if (pf->hw.mac.type == I40E_MAC_XL710 &&
5734fe87
PM
11346 pf->hw.func_caps.npar_enable &&
11347 (pf->hw.flags & I40E_HW_FLAG_FW_LLDP_STOPPABLE))
11348 pf->hw.flags &= ~I40E_HW_FLAG_FW_LLDP_STOPPABLE;
e3219ce6 11349
41c445ff 11350#ifdef CONFIG_PCI_IOV
ba252f13 11351 if (pf->hw.func_caps.num_vfs && pf->hw.partition_id == 1) {
41c445ff
JB
11352 pf->num_vf_qps = I40E_DEFAULT_QUEUES_PER_VF;
11353 pf->flags |= I40E_FLAG_SRIOV_ENABLED;
11354 pf->num_req_vfs = min_t(int,
11355 pf->hw.func_caps.num_vfs,
11356 I40E_MAX_VF_COUNT);
11357 }
11358#endif /* CONFIG_PCI_IOV */
11359 pf->eeprom_version = 0xDEAD;
11360 pf->lan_veb = I40E_NO_VEB;
11361 pf->lan_vsi = I40E_NO_VSI;
11362
d1a8d275
ASJ
11363 /* By default FW has this off for performance reasons */
11364 pf->flags &= ~I40E_FLAG_VEB_STATS_ENABLED;
11365
41c445ff
JB
11366 /* set up queue assignment tracking */
11367 size = sizeof(struct i40e_lump_tracking)
11368 + (sizeof(u16) * pf->hw.func_caps.num_tx_qp);
11369 pf->qp_pile = kzalloc(size, GFP_KERNEL);
11370 if (!pf->qp_pile) {
11371 err = -ENOMEM;
11372 goto sw_init_done;
11373 }
11374 pf->qp_pile->num_entries = pf->hw.func_caps.num_tx_qp;
11375 pf->qp_pile->search_hint = 0;
11376
327fe04b
ASJ
11377 pf->tx_timeout_recovery_level = 1;
11378
41c445ff
JB
11379 mutex_init(&pf->switch_mutex);
11380
11381sw_init_done:
11382 return err;
11383}
11384
7c3c288b
ASJ
11385/**
11386 * i40e_set_ntuple - set the ntuple feature flag and take action
11387 * @pf: board private structure to initialize
11388 * @features: the feature set that the stack is suggesting
11389 *
11390 * returns a bool to indicate if reset needs to happen
11391 **/
11392bool i40e_set_ntuple(struct i40e_pf *pf, netdev_features_t features)
11393{
11394 bool need_reset = false;
11395
11396 /* Check if Flow Director n-tuple support was enabled or disabled. If
11397 * the state changed, we need to reset.
11398 */
11399 if (features & NETIF_F_NTUPLE) {
11400 /* Enable filters and mark for reset */
11401 if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
11402 need_reset = true;
2f4b411a
AN
11403 /* enable FD_SB only if there is MSI-X vector and no cloud
11404 * filters exist
11405 */
11406 if (pf->num_fdsb_msix > 0 && !pf->num_cloud_filters) {
a70e407f 11407 pf->flags |= I40E_FLAG_FD_SB_ENABLED;
2f4b411a
AN
11408 pf->flags &= ~I40E_FLAG_FD_SB_INACTIVE;
11409 }
7c3c288b
ASJ
11410 } else {
11411 /* turn off filters, mark for reset and clear SW filter list */
11412 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
11413 need_reset = true;
11414 i40e_fdir_filter_exit(pf);
11415 }
134201ae
JK
11416 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
11417 clear_bit(__I40E_FD_SB_AUTO_DISABLED, pf->state);
2f4b411a
AN
11418 pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
11419
1e1be8f6 11420 /* reset fd counters */
097dbf52
JK
11421 pf->fd_add_err = 0;
11422 pf->fd_atr_cnt = 0;
8a4f34fb 11423 /* if ATR was auto disabled it can be re-enabled. */
134201ae 11424 if (test_and_clear_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state))
47994c11
JK
11425 if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
11426 (I40E_DEBUG_FD & pf->hw.debug_mask))
234dc4e6 11427 dev_info(&pf->pdev->dev, "ATR re-enabled.\n");
7c3c288b
ASJ
11428 }
11429 return need_reset;
11430}
11431
d8ec9864
AB
11432/**
11433 * i40e_clear_rss_lut - clear the rx hash lookup table
11434 * @vsi: the VSI being configured
11435 **/
11436static void i40e_clear_rss_lut(struct i40e_vsi *vsi)
11437{
11438 struct i40e_pf *pf = vsi->back;
11439 struct i40e_hw *hw = &pf->hw;
11440 u16 vf_id = vsi->vf_id;
11441 u8 i;
11442
11443 if (vsi->type == I40E_VSI_MAIN) {
11444 for (i = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++)
11445 wr32(hw, I40E_PFQF_HLUT(i), 0);
11446 } else if (vsi->type == I40E_VSI_SRIOV) {
11447 for (i = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++)
11448 i40e_write_rx_ctl(hw, I40E_VFQF_HLUT1(i, vf_id), 0);
11449 } else {
11450 dev_err(&pf->pdev->dev, "Cannot set RSS LUT - invalid VSI type\n");
11451 }
11452}
11453
41c445ff
JB
11454/**
11455 * i40e_set_features - set the netdev feature flags
11456 * @netdev: ptr to the netdev being adjusted
11457 * @features: the feature set that the stack is suggesting
373149fc 11458 * Note: expects to be called while under rtnl_lock()
41c445ff
JB
11459 **/
11460static int i40e_set_features(struct net_device *netdev,
11461 netdev_features_t features)
11462{
11463 struct i40e_netdev_priv *np = netdev_priv(netdev);
11464 struct i40e_vsi *vsi = np->vsi;
7c3c288b
ASJ
11465 struct i40e_pf *pf = vsi->back;
11466 bool need_reset;
41c445ff 11467
d8ec9864
AB
11468 if (features & NETIF_F_RXHASH && !(netdev->features & NETIF_F_RXHASH))
11469 i40e_pf_config_rss(pf);
11470 else if (!(features & NETIF_F_RXHASH) &&
11471 netdev->features & NETIF_F_RXHASH)
11472 i40e_clear_rss_lut(vsi);
11473
41c445ff
JB
11474 if (features & NETIF_F_HW_VLAN_CTAG_RX)
11475 i40e_vlan_stripping_enable(vsi);
11476 else
11477 i40e_vlan_stripping_disable(vsi);
11478
2f4b411a
AN
11479 if (!(features & NETIF_F_HW_TC) && pf->num_cloud_filters) {
11480 dev_err(&pf->pdev->dev,
11481 "Offloaded tc filters active, can't turn hw_tc_offload off");
11482 return -EINVAL;
11483 }
11484
7c3c288b
ASJ
11485 need_reset = i40e_set_ntuple(pf, features);
11486
11487 if (need_reset)
ff424188 11488 i40e_do_reset(pf, I40E_PF_RESET_FLAG, true);
7c3c288b 11489
41c445ff
JB
11490 return 0;
11491}
11492
a1c9a9d9 11493/**
6a899024 11494 * i40e_get_udp_port_idx - Lookup a possibly offloaded for Rx UDP port
a1c9a9d9
JK
11495 * @pf: board private structure
11496 * @port: The UDP port to look up
11497 *
11498 * Returns the index number or I40E_MAX_PF_UDP_OFFLOAD_PORTS if port not found
11499 **/
fe0b0cd9 11500static u8 i40e_get_udp_port_idx(struct i40e_pf *pf, u16 port)
a1c9a9d9
JK
11501{
11502 u8 i;
11503
11504 for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
5305d0fe
AD
11505 /* Do not report ports with pending deletions as
11506 * being available.
11507 */
11508 if (!port && (pf->pending_udp_bitmap & BIT_ULL(i)))
11509 continue;
27826fd5 11510 if (pf->udp_ports[i].port == port)
a1c9a9d9
JK
11511 return i;
11512 }
11513
11514 return i;
11515}
11516
11517/**
06a5f7f1 11518 * i40e_udp_tunnel_add - Get notifications about UDP tunnel ports that come up
a1c9a9d9 11519 * @netdev: This physical port's netdev
06a5f7f1 11520 * @ti: Tunnel endpoint information
a1c9a9d9 11521 **/
06a5f7f1
AD
11522static void i40e_udp_tunnel_add(struct net_device *netdev,
11523 struct udp_tunnel_info *ti)
a1c9a9d9
JK
11524{
11525 struct i40e_netdev_priv *np = netdev_priv(netdev);
11526 struct i40e_vsi *vsi = np->vsi;
11527 struct i40e_pf *pf = vsi->back;
fe0b0cd9 11528 u16 port = ntohs(ti->port);
a1c9a9d9
JK
11529 u8 next_idx;
11530 u8 idx;
11531
6a899024 11532 idx = i40e_get_udp_port_idx(pf, port);
a1c9a9d9
JK
11533
11534 /* Check if port already exists */
11535 if (idx < I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
fe0b0cd9 11536 netdev_info(netdev, "port %d already offloaded\n", port);
a1c9a9d9
JK
11537 return;
11538 }
11539
11540 /* Now check if there is space to add the new port */
6a899024 11541 next_idx = i40e_get_udp_port_idx(pf, 0);
a1c9a9d9
JK
11542
11543 if (next_idx == I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
06a5f7f1 11544 netdev_info(netdev, "maximum number of offloaded UDP ports reached, not adding port %d\n",
fe0b0cd9 11545 port);
6a899024
SA
11546 return;
11547 }
11548
06a5f7f1
AD
11549 switch (ti->type) {
11550 case UDP_TUNNEL_TYPE_VXLAN:
11551 pf->udp_ports[next_idx].type = I40E_AQC_TUNNEL_TYPE_VXLAN;
11552 break;
11553 case UDP_TUNNEL_TYPE_GENEVE:
d36e41dc 11554 if (!(pf->hw_features & I40E_HW_GENEVE_OFFLOAD_CAPABLE))
06a5f7f1
AD
11555 return;
11556 pf->udp_ports[next_idx].type = I40E_AQC_TUNNEL_TYPE_NGE;
11557 break;
11558 default:
6a899024
SA
11559 return;
11560 }
11561
11562 /* New port: add it and mark its index in the bitmap */
27826fd5 11563 pf->udp_ports[next_idx].port = port;
5305d0fe 11564 pf->udp_ports[next_idx].filter_index = I40E_UDP_PORT_INDEX_UNUSED;
6a899024 11565 pf->pending_udp_bitmap |= BIT_ULL(next_idx);
41898c66 11566 set_bit(__I40E_UDP_FILTER_SYNC_PENDING, pf->state);
a1c9a9d9
JK
11567}
11568
6a899024 11569/**
06a5f7f1 11570 * i40e_udp_tunnel_del - Get notifications about UDP tunnel ports that go away
6a899024 11571 * @netdev: This physical port's netdev
06a5f7f1 11572 * @ti: Tunnel endpoint information
6a899024 11573 **/
06a5f7f1
AD
11574static void i40e_udp_tunnel_del(struct net_device *netdev,
11575 struct udp_tunnel_info *ti)
6a899024 11576{
6a899024
SA
11577 struct i40e_netdev_priv *np = netdev_priv(netdev);
11578 struct i40e_vsi *vsi = np->vsi;
11579 struct i40e_pf *pf = vsi->back;
fe0b0cd9 11580 u16 port = ntohs(ti->port);
6a899024
SA
11581 u8 idx;
11582
6a899024
SA
11583 idx = i40e_get_udp_port_idx(pf, port);
11584
11585 /* Check if port already exists */
06a5f7f1
AD
11586 if (idx >= I40E_MAX_PF_UDP_OFFLOAD_PORTS)
11587 goto not_found;
6a899024 11588
06a5f7f1
AD
11589 switch (ti->type) {
11590 case UDP_TUNNEL_TYPE_VXLAN:
11591 if (pf->udp_ports[idx].type != I40E_AQC_TUNNEL_TYPE_VXLAN)
11592 goto not_found;
11593 break;
11594 case UDP_TUNNEL_TYPE_GENEVE:
11595 if (pf->udp_ports[idx].type != I40E_AQC_TUNNEL_TYPE_NGE)
11596 goto not_found;
11597 break;
11598 default:
11599 goto not_found;
6a899024 11600 }
06a5f7f1
AD
11601
11602 /* if port exists, set it to 0 (mark for deletion)
11603 * and make it pending
11604 */
27826fd5 11605 pf->udp_ports[idx].port = 0;
5305d0fe
AD
11606
11607 /* Toggle pending bit instead of setting it. This way if we are
11608 * deleting a port that has yet to be added we just clear the pending
11609 * bit and don't have to worry about it.
11610 */
11611 pf->pending_udp_bitmap ^= BIT_ULL(idx);
41898c66 11612 set_bit(__I40E_UDP_FILTER_SYNC_PENDING, pf->state);
06a5f7f1
AD
11613
11614 return;
11615not_found:
11616 netdev_warn(netdev, "UDP port %d was not found, not deleting\n",
fe0b0cd9 11617 port);
6a899024
SA
11618}
11619
1f224ad2 11620static int i40e_get_phys_port_id(struct net_device *netdev,
02637fce 11621 struct netdev_phys_item_id *ppid)
1f224ad2
NP
11622{
11623 struct i40e_netdev_priv *np = netdev_priv(netdev);
11624 struct i40e_pf *pf = np->vsi->back;
11625 struct i40e_hw *hw = &pf->hw;
11626
d36e41dc 11627 if (!(pf->hw_features & I40E_HW_PORT_ID_VALID))
1f224ad2
NP
11628 return -EOPNOTSUPP;
11629
11630 ppid->id_len = min_t(int, sizeof(hw->mac.port_addr), sizeof(ppid->id));
11631 memcpy(ppid->id, hw->mac.port_addr, ppid->id_len);
11632
11633 return 0;
11634}
11635
2f90ade6
JB
11636/**
11637 * i40e_ndo_fdb_add - add an entry to the hardware database
11638 * @ndm: the input from the stack
11639 * @tb: pointer to array of nladdr (unused)
11640 * @dev: the net device pointer
11641 * @addr: the MAC address entry being added
f5254429 11642 * @vid: VLAN ID
2f90ade6
JB
11643 * @flags: instructions from stack about fdb operation
11644 */
4ba0dea5
GR
11645static int i40e_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
11646 struct net_device *dev,
f6f6424b 11647 const unsigned char *addr, u16 vid,
87b0984e
PM
11648 u16 flags,
11649 struct netlink_ext_ack *extack)
4ba0dea5
GR
11650{
11651 struct i40e_netdev_priv *np = netdev_priv(dev);
11652 struct i40e_pf *pf = np->vsi->back;
11653 int err = 0;
11654
11655 if (!(pf->flags & I40E_FLAG_SRIOV_ENABLED))
11656 return -EOPNOTSUPP;
11657
65891fea
OG
11658 if (vid) {
11659 pr_info("%s: vlans aren't supported yet for dev_uc|mc_add()\n", dev->name);
11660 return -EINVAL;
11661 }
11662
4ba0dea5
GR
11663 /* Hardware does not support aging addresses so if a
11664 * ndm_state is given only allow permanent addresses
11665 */
11666 if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) {
11667 netdev_info(dev, "FDB only supports static addresses\n");
11668 return -EINVAL;
11669 }
11670
11671 if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
11672 err = dev_uc_add_excl(dev, addr);
11673 else if (is_multicast_ether_addr(addr))
11674 err = dev_mc_add_excl(dev, addr);
11675 else
11676 err = -EINVAL;
11677
11678 /* Only return duplicate errors if NLM_F_EXCL is set */
11679 if (err == -EEXIST && !(flags & NLM_F_EXCL))
11680 err = 0;
11681
11682 return err;
11683}
11684
51616018
NP
11685/**
11686 * i40e_ndo_bridge_setlink - Set the hardware bridge mode
11687 * @dev: the netdev being configured
11688 * @nlh: RTNL message
f5254429 11689 * @flags: bridge flags
2fd527b7 11690 * @extack: netlink extended ack
51616018
NP
11691 *
11692 * Inserts a new hardware bridge if not already created and
11693 * enables the bridging mode requested (VEB or VEPA). If the
11694 * hardware bridge has already been inserted and the request
11695 * is to change the mode then that requires a PF reset to
11696 * allow rebuild of the components with required hardware
11697 * bridge mode enabled.
373149fc
MS
11698 *
11699 * Note: expects to be called while under rtnl_lock()
51616018
NP
11700 **/
11701static int i40e_ndo_bridge_setlink(struct net_device *dev,
9df70b66 11702 struct nlmsghdr *nlh,
2fd527b7
PM
11703 u16 flags,
11704 struct netlink_ext_ack *extack)
51616018
NP
11705{
11706 struct i40e_netdev_priv *np = netdev_priv(dev);
11707 struct i40e_vsi *vsi = np->vsi;
11708 struct i40e_pf *pf = vsi->back;
11709 struct i40e_veb *veb = NULL;
11710 struct nlattr *attr, *br_spec;
11711 int i, rem;
11712
11713 /* Only for PF VSI for now */
11714 if (vsi->seid != pf->vsi[pf->lan_vsi]->seid)
11715 return -EOPNOTSUPP;
11716
11717 /* Find the HW bridge for PF VSI */
11718 for (i = 0; i < I40E_MAX_VEB && !veb; i++) {
11719 if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid)
11720 veb = pf->veb[i];
11721 }
11722
11723 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
11724
11725 nla_for_each_nested(attr, br_spec, rem) {
11726 __u16 mode;
11727
11728 if (nla_type(attr) != IFLA_BRIDGE_MODE)
11729 continue;
11730
11731 mode = nla_get_u16(attr);
11732 if ((mode != BRIDGE_MODE_VEPA) &&
11733 (mode != BRIDGE_MODE_VEB))
11734 return -EINVAL;
11735
11736 /* Insert a new HW bridge */
11737 if (!veb) {
11738 veb = i40e_veb_setup(pf, 0, vsi->uplink_seid, vsi->seid,
11739 vsi->tc_config.enabled_tc);
11740 if (veb) {
11741 veb->bridge_mode = mode;
11742 i40e_config_bridge_mode(veb);
11743 } else {
11744 /* No Bridge HW offload available */
11745 return -ENOENT;
11746 }
11747 break;
11748 } else if (mode != veb->bridge_mode) {
11749 /* Existing HW bridge but different mode needs reset */
11750 veb->bridge_mode = mode;
fc60861e
ASJ
11751 /* TODO: If no VFs or VMDq VSIs, disallow VEB mode */
11752 if (mode == BRIDGE_MODE_VEB)
11753 pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
11754 else
11755 pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
ff424188 11756 i40e_do_reset(pf, I40E_PF_RESET_FLAG, true);
51616018
NP
11757 break;
11758 }
11759 }
11760
11761 return 0;
11762}
11763
11764/**
11765 * i40e_ndo_bridge_getlink - Get the hardware bridge mode
11766 * @skb: skb buff
11767 * @pid: process id
11768 * @seq: RTNL message seq #
11769 * @dev: the netdev being configured
11770 * @filter_mask: unused
d4b2f9fe 11771 * @nlflags: netlink flags passed in
51616018
NP
11772 *
11773 * Return the mode in which the hardware bridge is operating in
11774 * i.e VEB or VEPA.
11775 **/
51616018
NP
11776static int i40e_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
11777 struct net_device *dev,
9f4ffc44
CW
11778 u32 __always_unused filter_mask,
11779 int nlflags)
51616018
NP
11780{
11781 struct i40e_netdev_priv *np = netdev_priv(dev);
11782 struct i40e_vsi *vsi = np->vsi;
11783 struct i40e_pf *pf = vsi->back;
11784 struct i40e_veb *veb = NULL;
11785 int i;
11786
11787 /* Only for PF VSI for now */
11788 if (vsi->seid != pf->vsi[pf->lan_vsi]->seid)
11789 return -EOPNOTSUPP;
11790
11791 /* Find the HW bridge for the PF VSI */
11792 for (i = 0; i < I40E_MAX_VEB && !veb; i++) {
11793 if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid)
11794 veb = pf->veb[i];
11795 }
11796
11797 if (!veb)
11798 return 0;
11799
46c264da 11800 return ndo_dflt_bridge_getlink(skb, pid, seq, dev, veb->bridge_mode,
599b076d 11801 0, 0, nlflags, filter_mask, NULL);
51616018 11802}
51616018 11803
f44a75e2
JS
11804/**
11805 * i40e_features_check - Validate encapsulated packet conforms to limits
11806 * @skb: skb buff
2bc11c63 11807 * @dev: This physical port's netdev
f44a75e2
JS
11808 * @features: Offload features that the stack believes apply
11809 **/
11810static netdev_features_t i40e_features_check(struct sk_buff *skb,
11811 struct net_device *dev,
11812 netdev_features_t features)
11813{
f114dca2
AD
11814 size_t len;
11815
11816 /* No point in doing any of this if neither checksum nor GSO are
11817 * being requested for this frame. We can rule out both by just
11818 * checking for CHECKSUM_PARTIAL
11819 */
11820 if (skb->ip_summed != CHECKSUM_PARTIAL)
11821 return features;
11822
11823 /* We cannot support GSO if the MSS is going to be less than
11824 * 64 bytes. If it is then we need to drop support for GSO.
11825 */
11826 if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_size < 64))
11827 features &= ~NETIF_F_GSO_MASK;
11828
11829 /* MACLEN can support at most 63 words */
11830 len = skb_network_header(skb) - skb->data;
11831 if (len & ~(63 * 2))
11832 goto out_err;
11833
11834 /* IPLEN and EIPLEN can support at most 127 dwords */
11835 len = skb_transport_header(skb) - skb_network_header(skb);
11836 if (len & ~(127 * 4))
11837 goto out_err;
11838
11839 if (skb->encapsulation) {
11840 /* L4TUNLEN can support 127 words */
11841 len = skb_inner_network_header(skb) - skb_transport_header(skb);
11842 if (len & ~(127 * 2))
11843 goto out_err;
11844
11845 /* IPLEN can support at most 127 dwords */
11846 len = skb_inner_transport_header(skb) -
11847 skb_inner_network_header(skb);
11848 if (len & ~(127 * 4))
11849 goto out_err;
11850 }
11851
11852 /* No need to validate L4LEN as TCP is the only protocol with a
11853 * a flexible value and we support all possible values supported
11854 * by TCP, which is at most 15 dwords
11855 */
f44a75e2
JS
11856
11857 return features;
f114dca2
AD
11858out_err:
11859 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
f44a75e2
JS
11860}
11861
0c8493d9
BT
11862/**
11863 * i40e_xdp_setup - add/remove an XDP program
11864 * @vsi: VSI to changed
11865 * @prog: XDP program
11866 **/
11867static int i40e_xdp_setup(struct i40e_vsi *vsi,
11868 struct bpf_prog *prog)
11869{
11870 int frame_size = vsi->netdev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
11871 struct i40e_pf *pf = vsi->back;
11872 struct bpf_prog *old_prog;
11873 bool need_reset;
11874 int i;
11875
11876 /* Don't allow frames that span over multiple buffers */
11877 if (frame_size > vsi->rx_buf_len)
11878 return -EINVAL;
11879
11880 if (!i40e_enabled_xdp_vsi(vsi) && !prog)
11881 return 0;
11882
11883 /* When turning XDP on->off/off->on we reset and rebuild the rings. */
11884 need_reset = (i40e_enabled_xdp_vsi(vsi) != !!prog);
11885
11886 if (need_reset)
11887 i40e_prep_for_reset(pf, true);
11888
11889 old_prog = xchg(&vsi->xdp_prog, prog);
11890
11891 if (need_reset)
11892 i40e_reset_and_rebuild(pf, true, true);
11893
11894 for (i = 0; i < vsi->num_queue_pairs; i++)
11895 WRITE_ONCE(vsi->rx_rings[i]->xdp_prog, vsi->xdp_prog);
11896
11897 if (old_prog)
11898 bpf_prog_put(old_prog);
11899
11900 return 0;
11901}
11902
123cecd4
BT
11903/**
11904 * i40e_enter_busy_conf - Enters busy config state
11905 * @vsi: vsi
11906 *
11907 * Returns 0 on success, <0 for failure.
11908 **/
11909static int i40e_enter_busy_conf(struct i40e_vsi *vsi)
11910{
11911 struct i40e_pf *pf = vsi->back;
11912 int timeout = 50;
11913
11914 while (test_and_set_bit(__I40E_CONFIG_BUSY, pf->state)) {
11915 timeout--;
11916 if (!timeout)
11917 return -EBUSY;
11918 usleep_range(1000, 2000);
11919 }
11920
11921 return 0;
11922}
11923
11924/**
11925 * i40e_exit_busy_conf - Exits busy config state
11926 * @vsi: vsi
11927 **/
11928static void i40e_exit_busy_conf(struct i40e_vsi *vsi)
11929{
11930 struct i40e_pf *pf = vsi->back;
11931
11932 clear_bit(__I40E_CONFIG_BUSY, pf->state);
11933}
11934
11935/**
11936 * i40e_queue_pair_reset_stats - Resets all statistics for a queue pair
11937 * @vsi: vsi
11938 * @queue_pair: queue pair
11939 **/
11940static void i40e_queue_pair_reset_stats(struct i40e_vsi *vsi, int queue_pair)
11941{
11942 memset(&vsi->rx_rings[queue_pair]->rx_stats, 0,
11943 sizeof(vsi->rx_rings[queue_pair]->rx_stats));
11944 memset(&vsi->tx_rings[queue_pair]->stats, 0,
11945 sizeof(vsi->tx_rings[queue_pair]->stats));
11946 if (i40e_enabled_xdp_vsi(vsi)) {
11947 memset(&vsi->xdp_rings[queue_pair]->stats, 0,
11948 sizeof(vsi->xdp_rings[queue_pair]->stats));
11949 }
11950}
11951
11952/**
11953 * i40e_queue_pair_clean_rings - Cleans all the rings of a queue pair
11954 * @vsi: vsi
11955 * @queue_pair: queue pair
11956 **/
11957static void i40e_queue_pair_clean_rings(struct i40e_vsi *vsi, int queue_pair)
11958{
11959 i40e_clean_tx_ring(vsi->tx_rings[queue_pair]);
11960 if (i40e_enabled_xdp_vsi(vsi))
11961 i40e_clean_tx_ring(vsi->xdp_rings[queue_pair]);
11962 i40e_clean_rx_ring(vsi->rx_rings[queue_pair]);
11963}
11964
11965/**
11966 * i40e_queue_pair_toggle_napi - Enables/disables NAPI for a queue pair
11967 * @vsi: vsi
11968 * @queue_pair: queue pair
11969 * @enable: true for enable, false for disable
11970 **/
11971static void i40e_queue_pair_toggle_napi(struct i40e_vsi *vsi, int queue_pair,
11972 bool enable)
11973{
11974 struct i40e_ring *rxr = vsi->rx_rings[queue_pair];
11975 struct i40e_q_vector *q_vector = rxr->q_vector;
11976
11977 if (!vsi->netdev)
11978 return;
11979
11980 /* All rings in a qp belong to the same qvector. */
11981 if (q_vector->rx.ring || q_vector->tx.ring) {
11982 if (enable)
11983 napi_enable(&q_vector->napi);
11984 else
11985 napi_disable(&q_vector->napi);
11986 }
11987}
11988
11989/**
11990 * i40e_queue_pair_toggle_rings - Enables/disables all rings for a queue pair
11991 * @vsi: vsi
11992 * @queue_pair: queue pair
11993 * @enable: true for enable, false for disable
11994 *
11995 * Returns 0 on success, <0 on failure.
11996 **/
11997static int i40e_queue_pair_toggle_rings(struct i40e_vsi *vsi, int queue_pair,
11998 bool enable)
11999{
12000 struct i40e_pf *pf = vsi->back;
12001 int pf_q, ret = 0;
12002
12003 pf_q = vsi->base_queue + queue_pair;
12004 ret = i40e_control_wait_tx_q(vsi->seid, pf, pf_q,
12005 false /*is xdp*/, enable);
12006 if (ret) {
12007 dev_info(&pf->pdev->dev,
12008 "VSI seid %d Tx ring %d %sable timeout\n",
12009 vsi->seid, pf_q, (enable ? "en" : "dis"));
12010 return ret;
12011 }
12012
12013 i40e_control_rx_q(pf, pf_q, enable);
12014 ret = i40e_pf_rxq_wait(pf, pf_q, enable);
12015 if (ret) {
12016 dev_info(&pf->pdev->dev,
12017 "VSI seid %d Rx ring %d %sable timeout\n",
12018 vsi->seid, pf_q, (enable ? "en" : "dis"));
12019 return ret;
12020 }
12021
12022 /* Due to HW errata, on Rx disable only, the register can
12023 * indicate done before it really is. Needs 50ms to be sure
12024 */
12025 if (!enable)
12026 mdelay(50);
12027
12028 if (!i40e_enabled_xdp_vsi(vsi))
12029 return ret;
12030
12031 ret = i40e_control_wait_tx_q(vsi->seid, pf,
12032 pf_q + vsi->alloc_queue_pairs,
12033 true /*is xdp*/, enable);
12034 if (ret) {
12035 dev_info(&pf->pdev->dev,
12036 "VSI seid %d XDP Tx ring %d %sable timeout\n",
12037 vsi->seid, pf_q, (enable ? "en" : "dis"));
12038 }
12039
12040 return ret;
12041}
12042
12043/**
12044 * i40e_queue_pair_enable_irq - Enables interrupts for a queue pair
12045 * @vsi: vsi
12046 * @queue_pair: queue_pair
12047 **/
12048static void i40e_queue_pair_enable_irq(struct i40e_vsi *vsi, int queue_pair)
12049{
12050 struct i40e_ring *rxr = vsi->rx_rings[queue_pair];
12051 struct i40e_pf *pf = vsi->back;
12052 struct i40e_hw *hw = &pf->hw;
12053
12054 /* All rings in a qp belong to the same qvector. */
12055 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
12056 i40e_irq_dynamic_enable(vsi, rxr->q_vector->v_idx);
12057 else
12058 i40e_irq_dynamic_enable_icr0(pf);
12059
12060 i40e_flush(hw);
12061}
12062
12063/**
12064 * i40e_queue_pair_disable_irq - Disables interrupts for a queue pair
12065 * @vsi: vsi
12066 * @queue_pair: queue_pair
12067 **/
12068static void i40e_queue_pair_disable_irq(struct i40e_vsi *vsi, int queue_pair)
12069{
12070 struct i40e_ring *rxr = vsi->rx_rings[queue_pair];
12071 struct i40e_pf *pf = vsi->back;
12072 struct i40e_hw *hw = &pf->hw;
12073
12074 /* For simplicity, instead of removing the qp interrupt causes
12075 * from the interrupt linked list, we simply disable the interrupt, and
12076 * leave the list intact.
12077 *
12078 * All rings in a qp belong to the same qvector.
12079 */
12080 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
12081 u32 intpf = vsi->base_vector + rxr->q_vector->v_idx;
12082
12083 wr32(hw, I40E_PFINT_DYN_CTLN(intpf - 1), 0);
12084 i40e_flush(hw);
12085 synchronize_irq(pf->msix_entries[intpf].vector);
12086 } else {
12087 /* Legacy and MSI mode - this stops all interrupt handling */
12088 wr32(hw, I40E_PFINT_ICR0_ENA, 0);
12089 wr32(hw, I40E_PFINT_DYN_CTL0, 0);
12090 i40e_flush(hw);
12091 synchronize_irq(pf->pdev->irq);
12092 }
12093}
12094
12095/**
12096 * i40e_queue_pair_disable - Disables a queue pair
12097 * @vsi: vsi
12098 * @queue_pair: queue pair
12099 *
12100 * Returns 0 on success, <0 on failure.
12101 **/
12102int i40e_queue_pair_disable(struct i40e_vsi *vsi, int queue_pair)
12103{
12104 int err;
12105
12106 err = i40e_enter_busy_conf(vsi);
12107 if (err)
12108 return err;
12109
12110 i40e_queue_pair_disable_irq(vsi, queue_pair);
12111 err = i40e_queue_pair_toggle_rings(vsi, queue_pair, false /* off */);
12112 i40e_queue_pair_toggle_napi(vsi, queue_pair, false /* off */);
12113 i40e_queue_pair_clean_rings(vsi, queue_pair);
12114 i40e_queue_pair_reset_stats(vsi, queue_pair);
12115
12116 return err;
12117}
12118
12119/**
12120 * i40e_queue_pair_enable - Enables a queue pair
12121 * @vsi: vsi
12122 * @queue_pair: queue pair
12123 *
12124 * Returns 0 on success, <0 on failure.
12125 **/
12126int i40e_queue_pair_enable(struct i40e_vsi *vsi, int queue_pair)
12127{
12128 int err;
12129
12130 err = i40e_configure_tx_ring(vsi->tx_rings[queue_pair]);
12131 if (err)
12132 return err;
12133
12134 if (i40e_enabled_xdp_vsi(vsi)) {
12135 err = i40e_configure_tx_ring(vsi->xdp_rings[queue_pair]);
12136 if (err)
12137 return err;
12138 }
12139
12140 err = i40e_configure_rx_ring(vsi->rx_rings[queue_pair]);
12141 if (err)
12142 return err;
12143
12144 err = i40e_queue_pair_toggle_rings(vsi, queue_pair, true /* on */);
12145 i40e_queue_pair_toggle_napi(vsi, queue_pair, true /* on */);
12146 i40e_queue_pair_enable_irq(vsi, queue_pair);
12147
12148 i40e_exit_busy_conf(vsi);
12149
12150 return err;
12151}
12152
0c8493d9 12153/**
f4e63525 12154 * i40e_xdp - implements ndo_bpf for i40e
0c8493d9
BT
12155 * @dev: netdevice
12156 * @xdp: XDP command
12157 **/
12158static int i40e_xdp(struct net_device *dev,
f4e63525 12159 struct netdev_bpf *xdp)
0c8493d9
BT
12160{
12161 struct i40e_netdev_priv *np = netdev_priv(dev);
12162 struct i40e_vsi *vsi = np->vsi;
12163
12164 if (vsi->type != I40E_VSI_MAIN)
12165 return -EINVAL;
12166
12167 switch (xdp->command) {
12168 case XDP_SETUP_PROG:
12169 return i40e_xdp_setup(vsi, xdp->prog);
12170 case XDP_QUERY_PROG:
eb23039f 12171 xdp->prog_id = vsi->xdp_prog ? vsi->xdp_prog->aux->id : 0;
0c8493d9 12172 return 0;
0a714186
BT
12173 case XDP_QUERY_XSK_UMEM:
12174 return i40e_xsk_umem_query(vsi, &xdp->xsk.umem,
12175 xdp->xsk.queue_id);
12176 case XDP_SETUP_XSK_UMEM:
12177 return i40e_xsk_umem_setup(vsi, xdp->xsk.umem,
12178 xdp->xsk.queue_id);
0c8493d9
BT
12179 default:
12180 return -EINVAL;
12181 }
12182}
12183
37a2973a 12184static const struct net_device_ops i40e_netdev_ops = {
41c445ff
JB
12185 .ndo_open = i40e_open,
12186 .ndo_stop = i40e_close,
12187 .ndo_start_xmit = i40e_lan_xmit_frame,
12188 .ndo_get_stats64 = i40e_get_netdev_stats_struct,
12189 .ndo_set_rx_mode = i40e_set_rx_mode,
12190 .ndo_validate_addr = eth_validate_addr,
12191 .ndo_set_mac_address = i40e_set_mac,
12192 .ndo_change_mtu = i40e_change_mtu,
beb0dff1 12193 .ndo_do_ioctl = i40e_ioctl,
41c445ff
JB
12194 .ndo_tx_timeout = i40e_tx_timeout,
12195 .ndo_vlan_rx_add_vid = i40e_vlan_rx_add_vid,
12196 .ndo_vlan_rx_kill_vid = i40e_vlan_rx_kill_vid,
12197#ifdef CONFIG_NET_POLL_CONTROLLER
12198 .ndo_poll_controller = i40e_netpoll,
12199#endif
e4c6734e 12200 .ndo_setup_tc = __i40e_setup_tc,
41c445ff
JB
12201 .ndo_set_features = i40e_set_features,
12202 .ndo_set_vf_mac = i40e_ndo_set_vf_mac,
12203 .ndo_set_vf_vlan = i40e_ndo_set_vf_port_vlan,
ed616689 12204 .ndo_set_vf_rate = i40e_ndo_set_vf_bw,
41c445ff 12205 .ndo_get_vf_config = i40e_ndo_get_vf_config,
588aefa0 12206 .ndo_set_vf_link_state = i40e_ndo_set_vf_link_state,
e6d9004d 12207 .ndo_set_vf_spoofchk = i40e_ndo_set_vf_spoofchk,
c3bbbd20 12208 .ndo_set_vf_trust = i40e_ndo_set_vf_trust,
06a5f7f1
AD
12209 .ndo_udp_tunnel_add = i40e_udp_tunnel_add,
12210 .ndo_udp_tunnel_del = i40e_udp_tunnel_del,
1f224ad2 12211 .ndo_get_phys_port_id = i40e_get_phys_port_id,
4ba0dea5 12212 .ndo_fdb_add = i40e_ndo_fdb_add,
f44a75e2 12213 .ndo_features_check = i40e_features_check,
51616018
NP
12214 .ndo_bridge_getlink = i40e_ndo_bridge_getlink,
12215 .ndo_bridge_setlink = i40e_ndo_bridge_setlink,
f4e63525 12216 .ndo_bpf = i40e_xdp,
d9314c47 12217 .ndo_xdp_xmit = i40e_xdp_xmit,
1328dcdd 12218 .ndo_xsk_async_xmit = i40e_xsk_async_xmit,
41c445ff
JB
12219};
12220
12221/**
12222 * i40e_config_netdev - Setup the netdev flags
12223 * @vsi: the VSI being configured
12224 *
12225 * Returns 0 on success, negative value on failure
12226 **/
12227static int i40e_config_netdev(struct i40e_vsi *vsi)
12228{
12229 struct i40e_pf *pf = vsi->back;
12230 struct i40e_hw *hw = &pf->hw;
12231 struct i40e_netdev_priv *np;
12232 struct net_device *netdev;
435c084a 12233 u8 broadcast[ETH_ALEN];
41c445ff
JB
12234 u8 mac_addr[ETH_ALEN];
12235 int etherdev_size;
bacd75cf
PB
12236 netdev_features_t hw_enc_features;
12237 netdev_features_t hw_features;
41c445ff
JB
12238
12239 etherdev_size = sizeof(struct i40e_netdev_priv);
f8ff1464 12240 netdev = alloc_etherdev_mq(etherdev_size, vsi->alloc_queue_pairs);
41c445ff
JB
12241 if (!netdev)
12242 return -ENOMEM;
12243
12244 vsi->netdev = netdev;
12245 np = netdev_priv(netdev);
12246 np->vsi = vsi;
12247
bacd75cf
PB
12248 hw_enc_features = NETIF_F_SG |
12249 NETIF_F_IP_CSUM |
12250 NETIF_F_IPV6_CSUM |
12251 NETIF_F_HIGHDMA |
12252 NETIF_F_SOFT_FEATURES |
12253 NETIF_F_TSO |
12254 NETIF_F_TSO_ECN |
12255 NETIF_F_TSO6 |
12256 NETIF_F_GSO_GRE |
12257 NETIF_F_GSO_GRE_CSUM |
12258 NETIF_F_GSO_PARTIAL |
ba766b8b
JK
12259 NETIF_F_GSO_IPXIP4 |
12260 NETIF_F_GSO_IPXIP6 |
bacd75cf
PB
12261 NETIF_F_GSO_UDP_TUNNEL |
12262 NETIF_F_GSO_UDP_TUNNEL_CSUM |
12263 NETIF_F_SCTP_CRC |
12264 NETIF_F_RXHASH |
12265 NETIF_F_RXCSUM |
12266 0;
41c445ff 12267
d36e41dc 12268 if (!(pf->hw_features & I40E_HW_OUTER_UDP_CSUM_CAPABLE))
1c7b4a23
AD
12269 netdev->gso_partial_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM;
12270
12271 netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM;
b0fe3306 12272
bacd75cf
PB
12273 netdev->hw_enc_features |= hw_enc_features;
12274
b0fe3306 12275 /* record features VLANs can make use of */
bacd75cf 12276 netdev->vlan_features |= hw_enc_features | NETIF_F_TSO_MANGLEID;
41c445ff 12277
bacd75cf
PB
12278 hw_features = hw_enc_features |
12279 NETIF_F_HW_VLAN_CTAG_TX |
12280 NETIF_F_HW_VLAN_CTAG_RX;
b0fe3306 12281
d5596fd4
JK
12282 if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
12283 hw_features |= NETIF_F_NTUPLE | NETIF_F_HW_TC;
12284
bacd75cf 12285 netdev->hw_features |= hw_features;
2e86a0b6 12286
bacd75cf 12287 netdev->features |= hw_features | NETIF_F_HW_VLAN_CTAG_FILTER;
1c7b4a23 12288 netdev->hw_enc_features |= NETIF_F_TSO_MANGLEID;
41c445ff
JB
12289
12290 if (vsi->type == I40E_VSI_MAIN) {
12291 SET_NETDEV_DEV(netdev, &pf->pdev->dev);
9a173901 12292 ether_addr_copy(mac_addr, hw->mac.perm_addr);
41c4c2b5
JK
12293 /* The following steps are necessary for two reasons. First,
12294 * some older NVM configurations load a default MAC-VLAN
12295 * filter that will accept any tagged packet, and we want to
12296 * replace this with a normal filter. Additionally, it is
12297 * possible our MAC address was provided by the platform using
12298 * Open Firmware or similar.
12299 *
12300 * Thus, we need to remove the default filter and install one
12301 * specific to the MAC address.
1596b5dd
JK
12302 */
12303 i40e_rm_default_mac_filter(vsi, mac_addr);
278e7d0b 12304 spin_lock_bh(&vsi->mac_filter_hash_lock);
9569a9a4 12305 i40e_add_mac_filter(vsi, mac_addr);
278e7d0b 12306 spin_unlock_bh(&vsi->mac_filter_hash_lock);
41c445ff 12307 } else {
8c9eb350
JK
12308 /* Relate the VSI_VMDQ name to the VSI_MAIN name. Note that we
12309 * are still limited by IFNAMSIZ, but we're adding 'v%d\0' to
12310 * the end, which is 4 bytes long, so force truncation of the
12311 * original name by IFNAMSIZ - 4
12312 */
12313 snprintf(netdev->name, IFNAMSIZ, "%.*sv%%d",
12314 IFNAMSIZ - 4,
41c445ff 12315 pf->vsi[pf->lan_vsi]->netdev->name);
6c1f0a1f 12316 eth_random_addr(mac_addr);
21659035 12317
278e7d0b 12318 spin_lock_bh(&vsi->mac_filter_hash_lock);
9569a9a4 12319 i40e_add_mac_filter(vsi, mac_addr);
278e7d0b 12320 spin_unlock_bh(&vsi->mac_filter_hash_lock);
41c445ff 12321 }
21659035 12322
435c084a
JK
12323 /* Add the broadcast filter so that we initially will receive
12324 * broadcast packets. Note that when a new VLAN is first added the
12325 * driver will convert all filters marked I40E_VLAN_ANY into VLAN
12326 * specific filters as part of transitioning into "vlan" operation.
12327 * When more VLANs are added, the driver will copy each existing MAC
12328 * filter and add it for the new VLAN.
12329 *
12330 * Broadcast filters are handled specially by
12331 * i40e_sync_filters_subtask, as the driver must to set the broadcast
12332 * promiscuous bit instead of adding this directly as a MAC/VLAN
12333 * filter. The subtask will update the correct broadcast promiscuous
12334 * bits as VLANs become active or inactive.
12335 */
12336 eth_broadcast_addr(broadcast);
12337 spin_lock_bh(&vsi->mac_filter_hash_lock);
9569a9a4 12338 i40e_add_mac_filter(vsi, broadcast);
435c084a
JK
12339 spin_unlock_bh(&vsi->mac_filter_hash_lock);
12340
9a173901
GR
12341 ether_addr_copy(netdev->dev_addr, mac_addr);
12342 ether_addr_copy(netdev->perm_addr, mac_addr);
b0fe3306 12343
31389b53
KK
12344 /* i40iw_net_event() reads 16 bytes from neigh->primary_key */
12345 netdev->neigh_priv_len = sizeof(u32) * 4;
12346
41c445ff
JB
12347 netdev->priv_flags |= IFF_UNICAST_FLT;
12348 netdev->priv_flags |= IFF_SUPP_NOFCS;
12349 /* Setup netdev TC information */
12350 i40e_vsi_config_netdev_tc(vsi, vsi->tc_config.enabled_tc);
12351
12352 netdev->netdev_ops = &i40e_netdev_ops;
12353 netdev->watchdog_timeo = 5 * HZ;
12354 i40e_set_ethtool_ops(netdev);
12355
91c527a5
JW
12356 /* MTU range: 68 - 9706 */
12357 netdev->min_mtu = ETH_MIN_MTU;
1e3a5fd5 12358 netdev->max_mtu = I40E_MAX_RXBUFFER - I40E_PACKET_HDR_PAD;
91c527a5 12359
41c445ff
JB
12360 return 0;
12361}
12362
12363/**
12364 * i40e_vsi_delete - Delete a VSI from the switch
12365 * @vsi: the VSI being removed
12366 *
12367 * Returns 0 on success, negative value on failure
12368 **/
12369static void i40e_vsi_delete(struct i40e_vsi *vsi)
12370{
12371 /* remove default VSI is not allowed */
12372 if (vsi == vsi->back->vsi[vsi->back->lan_vsi])
12373 return;
12374
41c445ff 12375 i40e_aq_delete_element(&vsi->back->hw, vsi->seid, NULL);
41c445ff
JB
12376}
12377
51616018
NP
12378/**
12379 * i40e_is_vsi_uplink_mode_veb - Check if the VSI's uplink bridge mode is VEB
12380 * @vsi: the VSI being queried
12381 *
12382 * Returns 1 if HW bridge mode is VEB and return 0 in case of VEPA mode
12383 **/
12384int i40e_is_vsi_uplink_mode_veb(struct i40e_vsi *vsi)
12385{
12386 struct i40e_veb *veb;
12387 struct i40e_pf *pf = vsi->back;
12388
12389 /* Uplink is not a bridge so default to VEB */
12390 if (vsi->veb_idx == I40E_NO_VEB)
12391 return 1;
12392
12393 veb = pf->veb[vsi->veb_idx];
09603eaa
AA
12394 if (!veb) {
12395 dev_info(&pf->pdev->dev,
12396 "There is no veb associated with the bridge\n");
12397 return -ENOENT;
12398 }
12399
51616018 12400 /* Uplink is a bridge in VEPA mode */
09603eaa 12401 if (veb->bridge_mode & BRIDGE_MODE_VEPA) {
51616018 12402 return 0;
09603eaa
AA
12403 } else {
12404 /* Uplink is a bridge in VEB mode */
12405 return 1;
12406 }
51616018 12407
09603eaa
AA
12408 /* VEPA is now default bridge, so return 0 */
12409 return 0;
51616018
NP
12410}
12411
41c445ff
JB
12412/**
12413 * i40e_add_vsi - Add a VSI to the switch
12414 * @vsi: the VSI being configured
12415 *
12416 * This initializes a VSI context depending on the VSI type to be added and
12417 * passes it down to the add_vsi aq command.
12418 **/
12419static int i40e_add_vsi(struct i40e_vsi *vsi)
12420{
12421 int ret = -ENODEV;
41c445ff
JB
12422 struct i40e_pf *pf = vsi->back;
12423 struct i40e_hw *hw = &pf->hw;
12424 struct i40e_vsi_context ctxt;
278e7d0b
JK
12425 struct i40e_mac_filter *f;
12426 struct hlist_node *h;
12427 int bkt;
21659035 12428
41c445ff
JB
12429 u8 enabled_tc = 0x1; /* TC0 enabled */
12430 int f_count = 0;
12431
12432 memset(&ctxt, 0, sizeof(ctxt));
12433 switch (vsi->type) {
12434 case I40E_VSI_MAIN:
12435 /* The PF's main VSI is already setup as part of the
12436 * device initialization, so we'll not bother with
12437 * the add_vsi call, but we will retrieve the current
12438 * VSI context.
12439 */
12440 ctxt.seid = pf->main_vsi_seid;
12441 ctxt.pf_num = pf->hw.pf_id;
12442 ctxt.vf_num = 0;
12443 ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
12444 ctxt.flags = I40E_AQ_VSI_TYPE_PF;
12445 if (ret) {
12446 dev_info(&pf->pdev->dev,
f1c7e72e
SN
12447 "couldn't get PF vsi config, err %s aq_err %s\n",
12448 i40e_stat_str(&pf->hw, ret),
12449 i40e_aq_str(&pf->hw,
12450 pf->hw.aq.asq_last_status));
41c445ff
JB
12451 return -ENOENT;
12452 }
1a2f6248 12453 vsi->info = ctxt.info;
41c445ff
JB
12454 vsi->info.valid_sections = 0;
12455
12456 vsi->seid = ctxt.seid;
12457 vsi->id = ctxt.vsi_number;
12458
12459 enabled_tc = i40e_pf_get_tc_map(pf);
12460
64615b54
MW
12461 /* Source pruning is enabled by default, so the flag is
12462 * negative logic - if it's set, we need to fiddle with
12463 * the VSI to disable source pruning.
12464 */
12465 if (pf->flags & I40E_FLAG_SOURCE_PRUNING_DISABLED) {
12466 memset(&ctxt, 0, sizeof(ctxt));
12467 ctxt.seid = pf->main_vsi_seid;
12468 ctxt.pf_num = pf->hw.pf_id;
12469 ctxt.vf_num = 0;
12470 ctxt.info.valid_sections |=
12471 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
12472 ctxt.info.switch_id =
12473 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_LOCAL_LB);
12474 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
12475 if (ret) {
12476 dev_info(&pf->pdev->dev,
12477 "update vsi failed, err %s aq_err %s\n",
12478 i40e_stat_str(&pf->hw, ret),
12479 i40e_aq_str(&pf->hw,
12480 pf->hw.aq.asq_last_status));
12481 ret = -ENOENT;
12482 goto err;
12483 }
12484 }
12485
41c445ff 12486 /* MFP mode setup queue map and update VSI */
63d7e5a4
NP
12487 if ((pf->flags & I40E_FLAG_MFP_ENABLED) &&
12488 !(pf->hw.func_caps.iscsi)) { /* NIC type PF */
41c445ff
JB
12489 memset(&ctxt, 0, sizeof(ctxt));
12490 ctxt.seid = pf->main_vsi_seid;
12491 ctxt.pf_num = pf->hw.pf_id;
12492 ctxt.vf_num = 0;
12493 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, false);
12494 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
12495 if (ret) {
12496 dev_info(&pf->pdev->dev,
f1c7e72e
SN
12497 "update vsi failed, err %s aq_err %s\n",
12498 i40e_stat_str(&pf->hw, ret),
12499 i40e_aq_str(&pf->hw,
12500 pf->hw.aq.asq_last_status));
41c445ff
JB
12501 ret = -ENOENT;
12502 goto err;
12503 }
12504 /* update the local VSI info queue map */
12505 i40e_vsi_update_queue_map(vsi, &ctxt);
12506 vsi->info.valid_sections = 0;
12507 } else {
12508 /* Default/Main VSI is only enabled for TC0
12509 * reconfigure it to enable all TCs that are
12510 * available on the port in SFP mode.
63d7e5a4
NP
12511 * For MFP case the iSCSI PF would use this
12512 * flow to enable LAN+iSCSI TC.
41c445ff
JB
12513 */
12514 ret = i40e_vsi_config_tc(vsi, enabled_tc);
12515 if (ret) {
19279235
CW
12516 /* Single TC condition is not fatal,
12517 * message and continue
12518 */
41c445ff 12519 dev_info(&pf->pdev->dev,
f1c7e72e
SN
12520 "failed to configure TCs for main VSI tc_map 0x%08x, err %s aq_err %s\n",
12521 enabled_tc,
12522 i40e_stat_str(&pf->hw, ret),
12523 i40e_aq_str(&pf->hw,
12524 pf->hw.aq.asq_last_status));
41c445ff
JB
12525 }
12526 }
12527 break;
12528
12529 case I40E_VSI_FDIR:
cbf61325
ASJ
12530 ctxt.pf_num = hw->pf_id;
12531 ctxt.vf_num = 0;
12532 ctxt.uplink_seid = vsi->uplink_seid;
2b18e591 12533 ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
cbf61325 12534 ctxt.flags = I40E_AQ_VSI_TYPE_PF;
fc60861e
ASJ
12535 if ((pf->flags & I40E_FLAG_VEB_MODE_ENABLED) &&
12536 (i40e_is_vsi_uplink_mode_veb(vsi))) {
51616018 12537 ctxt.info.valid_sections |=
fc60861e 12538 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
51616018 12539 ctxt.info.switch_id =
fc60861e 12540 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
51616018 12541 }
41c445ff 12542 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
41c445ff
JB
12543 break;
12544
12545 case I40E_VSI_VMDQ2:
12546 ctxt.pf_num = hw->pf_id;
12547 ctxt.vf_num = 0;
12548 ctxt.uplink_seid = vsi->uplink_seid;
2b18e591 12549 ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
41c445ff
JB
12550 ctxt.flags = I40E_AQ_VSI_TYPE_VMDQ2;
12551
41c445ff
JB
12552 /* This VSI is connected to VEB so the switch_id
12553 * should be set to zero by default.
12554 */
51616018
NP
12555 if (i40e_is_vsi_uplink_mode_veb(vsi)) {
12556 ctxt.info.valid_sections |=
12557 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
12558 ctxt.info.switch_id =
12559 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
12560 }
41c445ff
JB
12561
12562 /* Setup the VSI tx/rx queue map for TC0 only for now */
12563 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
12564 break;
12565
12566 case I40E_VSI_SRIOV:
12567 ctxt.pf_num = hw->pf_id;
12568 ctxt.vf_num = vsi->vf_id + hw->func_caps.vf_base_id;
12569 ctxt.uplink_seid = vsi->uplink_seid;
2b18e591 12570 ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
41c445ff
JB
12571 ctxt.flags = I40E_AQ_VSI_TYPE_VF;
12572
41c445ff
JB
12573 /* This VSI is connected to VEB so the switch_id
12574 * should be set to zero by default.
12575 */
51616018
NP
12576 if (i40e_is_vsi_uplink_mode_veb(vsi)) {
12577 ctxt.info.valid_sections |=
12578 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
12579 ctxt.info.switch_id =
12580 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
12581 }
41c445ff 12582
e3219ce6
ASJ
12583 if (vsi->back->flags & I40E_FLAG_IWARP_ENABLED) {
12584 ctxt.info.valid_sections |=
12585 cpu_to_le16(I40E_AQ_VSI_PROP_QUEUE_OPT_VALID);
12586 ctxt.info.queueing_opt_flags |=
4b28cdba
AS
12587 (I40E_AQ_VSI_QUE_OPT_TCP_ENA |
12588 I40E_AQ_VSI_QUE_OPT_RSS_LUT_VSI);
e3219ce6
ASJ
12589 }
12590
41c445ff
JB
12591 ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
12592 ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL;
c674d125
MW
12593 if (pf->vf[vsi->vf_id].spoofchk) {
12594 ctxt.info.valid_sections |=
12595 cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID);
12596 ctxt.info.sec_flags |=
12597 (I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK |
12598 I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK);
12599 }
41c445ff
JB
12600 /* Setup the VSI tx/rx queue map for TC0 only for now */
12601 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
12602 break;
12603
e3219ce6
ASJ
12604 case I40E_VSI_IWARP:
12605 /* send down message to iWARP */
12606 break;
12607
41c445ff
JB
12608 default:
12609 return -ENODEV;
12610 }
12611
12612 if (vsi->type != I40E_VSI_MAIN) {
12613 ret = i40e_aq_add_vsi(hw, &ctxt, NULL);
12614 if (ret) {
12615 dev_info(&vsi->back->pdev->dev,
f1c7e72e
SN
12616 "add vsi failed, err %s aq_err %s\n",
12617 i40e_stat_str(&pf->hw, ret),
12618 i40e_aq_str(&pf->hw,
12619 pf->hw.aq.asq_last_status));
41c445ff
JB
12620 ret = -ENOENT;
12621 goto err;
12622 }
1a2f6248 12623 vsi->info = ctxt.info;
41c445ff
JB
12624 vsi->info.valid_sections = 0;
12625 vsi->seid = ctxt.seid;
12626 vsi->id = ctxt.vsi_number;
12627 }
12628
c3c7ea27 12629 vsi->active_filters = 0;
0da36b97 12630 clear_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
278e7d0b 12631 spin_lock_bh(&vsi->mac_filter_hash_lock);
41c445ff 12632 /* If macvlan filters already exist, force them to get loaded */
278e7d0b 12633 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
c3c7ea27 12634 f->state = I40E_FILTER_NEW;
41c445ff 12635 f_count++;
21659035 12636 }
278e7d0b 12637 spin_unlock_bh(&vsi->mac_filter_hash_lock);
30650cc5 12638
41c445ff
JB
12639 if (f_count) {
12640 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
bfe040c3 12641 set_bit(__I40E_MACVLAN_SYNC_PENDING, pf->state);
41c445ff
JB
12642 }
12643
12644 /* Update VSI BW information */
12645 ret = i40e_vsi_get_bw_info(vsi);
12646 if (ret) {
12647 dev_info(&pf->pdev->dev,
f1c7e72e
SN
12648 "couldn't get vsi bw info, err %s aq_err %s\n",
12649 i40e_stat_str(&pf->hw, ret),
12650 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
41c445ff
JB
12651 /* VSI is already added so not tearing that up */
12652 ret = 0;
12653 }
12654
12655err:
12656 return ret;
12657}
12658
12659/**
12660 * i40e_vsi_release - Delete a VSI and free its resources
12661 * @vsi: the VSI being removed
12662 *
12663 * Returns 0 on success or < 0 on error
12664 **/
12665int i40e_vsi_release(struct i40e_vsi *vsi)
12666{
278e7d0b
JK
12667 struct i40e_mac_filter *f;
12668 struct hlist_node *h;
41c445ff
JB
12669 struct i40e_veb *veb = NULL;
12670 struct i40e_pf *pf;
12671 u16 uplink_seid;
278e7d0b 12672 int i, n, bkt;
41c445ff
JB
12673
12674 pf = vsi->back;
12675
12676 /* release of a VEB-owner or last VSI is not allowed */
12677 if (vsi->flags & I40E_VSI_FLAG_VEB_OWNER) {
12678 dev_info(&pf->pdev->dev, "VSI %d has existing VEB %d\n",
12679 vsi->seid, vsi->uplink_seid);
12680 return -ENODEV;
12681 }
12682 if (vsi == pf->vsi[pf->lan_vsi] &&
9e6c9c0f 12683 !test_bit(__I40E_DOWN, pf->state)) {
41c445ff
JB
12684 dev_info(&pf->pdev->dev, "Can't remove PF VSI\n");
12685 return -ENODEV;
12686 }
12687
12688 uplink_seid = vsi->uplink_seid;
12689 if (vsi->type != I40E_VSI_SRIOV) {
12690 if (vsi->netdev_registered) {
12691 vsi->netdev_registered = false;
12692 if (vsi->netdev) {
12693 /* results in a call to i40e_close() */
12694 unregister_netdev(vsi->netdev);
41c445ff
JB
12695 }
12696 } else {
90ef8d47 12697 i40e_vsi_close(vsi);
41c445ff
JB
12698 }
12699 i40e_vsi_disable_irq(vsi);
12700 }
12701
278e7d0b 12702 spin_lock_bh(&vsi->mac_filter_hash_lock);
6622f5cd
JK
12703
12704 /* clear the sync flag on all filters */
12705 if (vsi->netdev) {
12706 __dev_uc_unsync(vsi->netdev, NULL);
12707 __dev_mc_unsync(vsi->netdev, NULL);
12708 }
12709
12710 /* make sure any remaining filters are marked for deletion */
278e7d0b 12711 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist)
290d2557 12712 __i40e_del_filter(vsi, f);
6622f5cd 12713
278e7d0b 12714 spin_unlock_bh(&vsi->mac_filter_hash_lock);
21659035 12715
17652c63 12716 i40e_sync_vsi_filters(vsi);
41c445ff
JB
12717
12718 i40e_vsi_delete(vsi);
12719 i40e_vsi_free_q_vectors(vsi);
a4866597
SN
12720 if (vsi->netdev) {
12721 free_netdev(vsi->netdev);
12722 vsi->netdev = NULL;
12723 }
41c445ff
JB
12724 i40e_vsi_clear_rings(vsi);
12725 i40e_vsi_clear(vsi);
12726
12727 /* If this was the last thing on the VEB, except for the
12728 * controlling VSI, remove the VEB, which puts the controlling
12729 * VSI onto the next level down in the switch.
12730 *
12731 * Well, okay, there's one more exception here: don't remove
12732 * the orphan VEBs yet. We'll wait for an explicit remove request
12733 * from up the network stack.
12734 */
505682cd 12735 for (n = 0, i = 0; i < pf->num_alloc_vsi; i++) {
41c445ff
JB
12736 if (pf->vsi[i] &&
12737 pf->vsi[i]->uplink_seid == uplink_seid &&
12738 (pf->vsi[i]->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) {
12739 n++; /* count the VSIs */
12740 }
12741 }
12742 for (i = 0; i < I40E_MAX_VEB; i++) {
12743 if (!pf->veb[i])
12744 continue;
12745 if (pf->veb[i]->uplink_seid == uplink_seid)
12746 n++; /* count the VEBs */
12747 if (pf->veb[i]->seid == uplink_seid)
12748 veb = pf->veb[i];
12749 }
12750 if (n == 0 && veb && veb->uplink_seid != 0)
12751 i40e_veb_release(veb);
12752
12753 return 0;
12754}
12755
12756/**
12757 * i40e_vsi_setup_vectors - Set up the q_vectors for the given VSI
12758 * @vsi: ptr to the VSI
12759 *
12760 * This should only be called after i40e_vsi_mem_alloc() which allocates the
12761 * corresponding SW VSI structure and initializes num_queue_pairs for the
12762 * newly allocated VSI.
12763 *
12764 * Returns 0 on success or negative on failure
12765 **/
12766static int i40e_vsi_setup_vectors(struct i40e_vsi *vsi)
12767{
12768 int ret = -ENOENT;
12769 struct i40e_pf *pf = vsi->back;
12770
493fb300 12771 if (vsi->q_vectors[0]) {
41c445ff
JB
12772 dev_info(&pf->pdev->dev, "VSI %d has existing q_vectors\n",
12773 vsi->seid);
12774 return -EEXIST;
12775 }
12776
12777 if (vsi->base_vector) {
f29eaa3d 12778 dev_info(&pf->pdev->dev, "VSI %d has non-zero base vector %d\n",
41c445ff
JB
12779 vsi->seid, vsi->base_vector);
12780 return -EEXIST;
12781 }
12782
90e04070 12783 ret = i40e_vsi_alloc_q_vectors(vsi);
41c445ff
JB
12784 if (ret) {
12785 dev_info(&pf->pdev->dev,
12786 "failed to allocate %d q_vector for VSI %d, ret=%d\n",
12787 vsi->num_q_vectors, vsi->seid, ret);
12788 vsi->num_q_vectors = 0;
12789 goto vector_setup_out;
12790 }
12791
26cdc443
ASJ
12792 /* In Legacy mode, we do not have to get any other vector since we
12793 * piggyback on the misc/ICR0 for queue interrupts.
12794 */
12795 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED))
12796 return ret;
958a3e3b
SN
12797 if (vsi->num_q_vectors)
12798 vsi->base_vector = i40e_get_lump(pf, pf->irq_pile,
12799 vsi->num_q_vectors, vsi->idx);
41c445ff
JB
12800 if (vsi->base_vector < 0) {
12801 dev_info(&pf->pdev->dev,
049a2be8
SN
12802 "failed to get tracking for %d vectors for VSI %d, err=%d\n",
12803 vsi->num_q_vectors, vsi->seid, vsi->base_vector);
41c445ff
JB
12804 i40e_vsi_free_q_vectors(vsi);
12805 ret = -ENOENT;
12806 goto vector_setup_out;
12807 }
12808
12809vector_setup_out:
12810 return ret;
12811}
12812
bc7d338f
ASJ
12813/**
12814 * i40e_vsi_reinit_setup - return and reallocate resources for a VSI
12815 * @vsi: pointer to the vsi.
12816 *
12817 * This re-allocates a vsi's queue resources.
12818 *
12819 * Returns pointer to the successfully allocated and configured VSI sw struct
12820 * on success, otherwise returns NULL on failure.
12821 **/
12822static struct i40e_vsi *i40e_vsi_reinit_setup(struct i40e_vsi *vsi)
12823{
74608d17 12824 u16 alloc_queue_pairs;
f534039d 12825 struct i40e_pf *pf;
bc7d338f
ASJ
12826 u8 enabled_tc;
12827 int ret;
12828
f534039d
JU
12829 if (!vsi)
12830 return NULL;
12831
12832 pf = vsi->back;
12833
bc7d338f
ASJ
12834 i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx);
12835 i40e_vsi_clear_rings(vsi);
12836
12837 i40e_vsi_free_arrays(vsi, false);
12838 i40e_set_num_rings_in_vsi(vsi);
12839 ret = i40e_vsi_alloc_arrays(vsi, false);
12840 if (ret)
12841 goto err_vsi;
12842
74608d17
BT
12843 alloc_queue_pairs = vsi->alloc_queue_pairs *
12844 (i40e_enabled_xdp_vsi(vsi) ? 2 : 1);
12845
12846 ret = i40e_get_lump(pf, pf->qp_pile, alloc_queue_pairs, vsi->idx);
bc7d338f 12847 if (ret < 0) {
049a2be8 12848 dev_info(&pf->pdev->dev,
f1c7e72e 12849 "failed to get tracking for %d queues for VSI %d err %d\n",
74608d17 12850 alloc_queue_pairs, vsi->seid, ret);
bc7d338f
ASJ
12851 goto err_vsi;
12852 }
12853 vsi->base_queue = ret;
12854
12855 /* Update the FW view of the VSI. Force a reset of TC and queue
12856 * layout configurations.
12857 */
12858 enabled_tc = pf->vsi[pf->lan_vsi]->tc_config.enabled_tc;
12859 pf->vsi[pf->lan_vsi]->tc_config.enabled_tc = 0;
12860 pf->vsi[pf->lan_vsi]->seid = pf->main_vsi_seid;
12861 i40e_vsi_config_tc(pf->vsi[pf->lan_vsi], enabled_tc);
1596b5dd
JK
12862 if (vsi->type == I40E_VSI_MAIN)
12863 i40e_rm_default_mac_filter(vsi, pf->hw.mac.perm_addr);
bc7d338f
ASJ
12864
12865 /* assign it some queues */
12866 ret = i40e_alloc_rings(vsi);
12867 if (ret)
12868 goto err_rings;
12869
12870 /* map all of the rings to the q_vectors */
12871 i40e_vsi_map_rings_to_vectors(vsi);
12872 return vsi;
12873
12874err_rings:
12875 i40e_vsi_free_q_vectors(vsi);
12876 if (vsi->netdev_registered) {
12877 vsi->netdev_registered = false;
12878 unregister_netdev(vsi->netdev);
12879 free_netdev(vsi->netdev);
12880 vsi->netdev = NULL;
12881 }
12882 i40e_aq_delete_element(&pf->hw, vsi->seid, NULL);
12883err_vsi:
12884 i40e_vsi_clear(vsi);
12885 return NULL;
12886}
12887
41c445ff
JB
12888/**
12889 * i40e_vsi_setup - Set up a VSI by a given type
12890 * @pf: board private structure
12891 * @type: VSI type
12892 * @uplink_seid: the switch element to link to
12893 * @param1: usage depends upon VSI type. For VF types, indicates VF id
12894 *
12895 * This allocates the sw VSI structure and its queue resources, then add a VSI
12896 * to the identified VEB.
12897 *
12898 * Returns pointer to the successfully allocated and configure VSI sw struct on
12899 * success, otherwise returns NULL on failure.
12900 **/
12901struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
12902 u16 uplink_seid, u32 param1)
12903{
12904 struct i40e_vsi *vsi = NULL;
12905 struct i40e_veb *veb = NULL;
74608d17 12906 u16 alloc_queue_pairs;
41c445ff
JB
12907 int ret, i;
12908 int v_idx;
12909
12910 /* The requested uplink_seid must be either
12911 * - the PF's port seid
12912 * no VEB is needed because this is the PF
12913 * or this is a Flow Director special case VSI
12914 * - seid of an existing VEB
12915 * - seid of a VSI that owns an existing VEB
12916 * - seid of a VSI that doesn't own a VEB
12917 * a new VEB is created and the VSI becomes the owner
12918 * - seid of the PF VSI, which is what creates the first VEB
12919 * this is a special case of the previous
12920 *
12921 * Find which uplink_seid we were given and create a new VEB if needed
12922 */
12923 for (i = 0; i < I40E_MAX_VEB; i++) {
12924 if (pf->veb[i] && pf->veb[i]->seid == uplink_seid) {
12925 veb = pf->veb[i];
12926 break;
12927 }
12928 }
12929
12930 if (!veb && uplink_seid != pf->mac_seid) {
12931
505682cd 12932 for (i = 0; i < pf->num_alloc_vsi; i++) {
41c445ff
JB
12933 if (pf->vsi[i] && pf->vsi[i]->seid == uplink_seid) {
12934 vsi = pf->vsi[i];
12935 break;
12936 }
12937 }
12938 if (!vsi) {
12939 dev_info(&pf->pdev->dev, "no such uplink_seid %d\n",
12940 uplink_seid);
12941 return NULL;
12942 }
12943
12944 if (vsi->uplink_seid == pf->mac_seid)
12945 veb = i40e_veb_setup(pf, 0, pf->mac_seid, vsi->seid,
12946 vsi->tc_config.enabled_tc);
12947 else if ((vsi->flags & I40E_VSI_FLAG_VEB_OWNER) == 0)
12948 veb = i40e_veb_setup(pf, 0, vsi->uplink_seid, vsi->seid,
12949 vsi->tc_config.enabled_tc);
79c21a82
ASJ
12950 if (veb) {
12951 if (vsi->seid != pf->vsi[pf->lan_vsi]->seid) {
12952 dev_info(&vsi->back->pdev->dev,
fb43201f 12953 "New VSI creation error, uplink seid of LAN VSI expected.\n");
79c21a82
ASJ
12954 return NULL;
12955 }
fa11cb3d
ASJ
12956 /* We come up by default in VEPA mode if SRIOV is not
12957 * already enabled, in which case we can't force VEPA
12958 * mode.
12959 */
12960 if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) {
12961 veb->bridge_mode = BRIDGE_MODE_VEPA;
12962 pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
12963 }
51616018 12964 i40e_config_bridge_mode(veb);
79c21a82 12965 }
41c445ff
JB
12966 for (i = 0; i < I40E_MAX_VEB && !veb; i++) {
12967 if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid)
12968 veb = pf->veb[i];
12969 }
12970 if (!veb) {
12971 dev_info(&pf->pdev->dev, "couldn't add VEB\n");
12972 return NULL;
12973 }
12974
12975 vsi->flags |= I40E_VSI_FLAG_VEB_OWNER;
12976 uplink_seid = veb->seid;
12977 }
12978
12979 /* get vsi sw struct */
12980 v_idx = i40e_vsi_mem_alloc(pf, type);
12981 if (v_idx < 0)
12982 goto err_alloc;
12983 vsi = pf->vsi[v_idx];
cbf61325
ASJ
12984 if (!vsi)
12985 goto err_alloc;
41c445ff
JB
12986 vsi->type = type;
12987 vsi->veb_idx = (veb ? veb->idx : I40E_NO_VEB);
12988
12989 if (type == I40E_VSI_MAIN)
12990 pf->lan_vsi = v_idx;
12991 else if (type == I40E_VSI_SRIOV)
12992 vsi->vf_id = param1;
12993 /* assign it some queues */
74608d17
BT
12994 alloc_queue_pairs = vsi->alloc_queue_pairs *
12995 (i40e_enabled_xdp_vsi(vsi) ? 2 : 1);
12996
12997 ret = i40e_get_lump(pf, pf->qp_pile, alloc_queue_pairs, vsi->idx);
41c445ff 12998 if (ret < 0) {
049a2be8
SN
12999 dev_info(&pf->pdev->dev,
13000 "failed to get tracking for %d queues for VSI %d err=%d\n",
74608d17 13001 alloc_queue_pairs, vsi->seid, ret);
41c445ff
JB
13002 goto err_vsi;
13003 }
13004 vsi->base_queue = ret;
13005
13006 /* get a VSI from the hardware */
13007 vsi->uplink_seid = uplink_seid;
13008 ret = i40e_add_vsi(vsi);
13009 if (ret)
13010 goto err_vsi;
13011
13012 switch (vsi->type) {
13013 /* setup the netdev if needed */
13014 case I40E_VSI_MAIN:
13015 case I40E_VSI_VMDQ2:
13016 ret = i40e_config_netdev(vsi);
13017 if (ret)
13018 goto err_netdev;
13019 ret = register_netdev(vsi->netdev);
13020 if (ret)
13021 goto err_netdev;
13022 vsi->netdev_registered = true;
13023 netif_carrier_off(vsi->netdev);
4e3b35b0
NP
13024#ifdef CONFIG_I40E_DCB
13025 /* Setup DCB netlink interface */
13026 i40e_dcbnl_setup(vsi);
13027#endif /* CONFIG_I40E_DCB */
41c445ff
JB
13028 /* fall through */
13029
13030 case I40E_VSI_FDIR:
13031 /* set up vectors and rings if needed */
13032 ret = i40e_vsi_setup_vectors(vsi);
13033 if (ret)
13034 goto err_msix;
13035
13036 ret = i40e_alloc_rings(vsi);
13037 if (ret)
13038 goto err_rings;
13039
13040 /* map all of the rings to the q_vectors */
13041 i40e_vsi_map_rings_to_vectors(vsi);
13042
13043 i40e_vsi_reset_stats(vsi);
13044 break;
13045
13046 default:
13047 /* no netdev or rings for the other VSI types */
13048 break;
13049 }
13050
d36e41dc 13051 if ((pf->hw_features & I40E_HW_RSS_AQ_CAPABLE) &&
e25d00b8
ASJ
13052 (vsi->type == I40E_VSI_VMDQ2)) {
13053 ret = i40e_vsi_config_rss(vsi);
13054 }
41c445ff
JB
13055 return vsi;
13056
13057err_rings:
13058 i40e_vsi_free_q_vectors(vsi);
13059err_msix:
13060 if (vsi->netdev_registered) {
13061 vsi->netdev_registered = false;
13062 unregister_netdev(vsi->netdev);
13063 free_netdev(vsi->netdev);
13064 vsi->netdev = NULL;
13065 }
13066err_netdev:
13067 i40e_aq_delete_element(&pf->hw, vsi->seid, NULL);
13068err_vsi:
13069 i40e_vsi_clear(vsi);
13070err_alloc:
13071 return NULL;
13072}
13073
13074/**
13075 * i40e_veb_get_bw_info - Query VEB BW information
13076 * @veb: the veb to query
13077 *
13078 * Query the Tx scheduler BW configuration data for given VEB
13079 **/
13080static int i40e_veb_get_bw_info(struct i40e_veb *veb)
13081{
13082 struct i40e_aqc_query_switching_comp_ets_config_resp ets_data;
13083 struct i40e_aqc_query_switching_comp_bw_config_resp bw_data;
13084 struct i40e_pf *pf = veb->pf;
13085 struct i40e_hw *hw = &pf->hw;
13086 u32 tc_bw_max;
13087 int ret = 0;
13088 int i;
13089
13090 ret = i40e_aq_query_switch_comp_bw_config(hw, veb->seid,
13091 &bw_data, NULL);
13092 if (ret) {
13093 dev_info(&pf->pdev->dev,
f1c7e72e
SN
13094 "query veb bw config failed, err %s aq_err %s\n",
13095 i40e_stat_str(&pf->hw, ret),
13096 i40e_aq_str(&pf->hw, hw->aq.asq_last_status));
41c445ff
JB
13097 goto out;
13098 }
13099
13100 ret = i40e_aq_query_switch_comp_ets_config(hw, veb->seid,
13101 &ets_data, NULL);
13102 if (ret) {
13103 dev_info(&pf->pdev->dev,
f1c7e72e
SN
13104 "query veb bw ets config failed, err %s aq_err %s\n",
13105 i40e_stat_str(&pf->hw, ret),
13106 i40e_aq_str(&pf->hw, hw->aq.asq_last_status));
41c445ff
JB
13107 goto out;
13108 }
13109
13110 veb->bw_limit = le16_to_cpu(ets_data.port_bw_limit);
13111 veb->bw_max_quanta = ets_data.tc_bw_max;
13112 veb->is_abs_credits = bw_data.absolute_credits_enable;
23cd1f09 13113 veb->enabled_tc = ets_data.tc_valid_bits;
41c445ff
JB
13114 tc_bw_max = le16_to_cpu(bw_data.tc_bw_max[0]) |
13115 (le16_to_cpu(bw_data.tc_bw_max[1]) << 16);
13116 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
13117 veb->bw_tc_share_credits[i] = bw_data.tc_bw_share_credits[i];
13118 veb->bw_tc_limit_credits[i] =
13119 le16_to_cpu(bw_data.tc_bw_limits[i]);
13120 veb->bw_tc_max_quanta[i] = ((tc_bw_max >> (i*4)) & 0x7);
13121 }
13122
13123out:
13124 return ret;
13125}
13126
13127/**
13128 * i40e_veb_mem_alloc - Allocates the next available struct veb in the PF
13129 * @pf: board private structure
13130 *
13131 * On error: returns error code (negative)
13132 * On success: returns vsi index in PF (positive)
13133 **/
13134static int i40e_veb_mem_alloc(struct i40e_pf *pf)
13135{
13136 int ret = -ENOENT;
13137 struct i40e_veb *veb;
13138 int i;
13139
13140 /* Need to protect the allocation of switch elements at the PF level */
13141 mutex_lock(&pf->switch_mutex);
13142
13143 /* VEB list may be fragmented if VEB creation/destruction has
13144 * been happening. We can afford to do a quick scan to look
13145 * for any free slots in the list.
13146 *
13147 * find next empty veb slot, looping back around if necessary
13148 */
13149 i = 0;
13150 while ((i < I40E_MAX_VEB) && (pf->veb[i] != NULL))
13151 i++;
13152 if (i >= I40E_MAX_VEB) {
13153 ret = -ENOMEM;
13154 goto err_alloc_veb; /* out of VEB slots! */
13155 }
13156
13157 veb = kzalloc(sizeof(*veb), GFP_KERNEL);
13158 if (!veb) {
13159 ret = -ENOMEM;
13160 goto err_alloc_veb;
13161 }
13162 veb->pf = pf;
13163 veb->idx = i;
13164 veb->enabled_tc = 1;
13165
13166 pf->veb[i] = veb;
13167 ret = i;
13168err_alloc_veb:
13169 mutex_unlock(&pf->switch_mutex);
13170 return ret;
13171}
13172
13173/**
13174 * i40e_switch_branch_release - Delete a branch of the switch tree
13175 * @branch: where to start deleting
13176 *
13177 * This uses recursion to find the tips of the branch to be
13178 * removed, deleting until we get back to and can delete this VEB.
13179 **/
13180static void i40e_switch_branch_release(struct i40e_veb *branch)
13181{
13182 struct i40e_pf *pf = branch->pf;
13183 u16 branch_seid = branch->seid;
13184 u16 veb_idx = branch->idx;
13185 int i;
13186
13187 /* release any VEBs on this VEB - RECURSION */
13188 for (i = 0; i < I40E_MAX_VEB; i++) {
13189 if (!pf->veb[i])
13190 continue;
13191 if (pf->veb[i]->uplink_seid == branch->seid)
13192 i40e_switch_branch_release(pf->veb[i]);
13193 }
13194
13195 /* Release the VSIs on this VEB, but not the owner VSI.
13196 *
13197 * NOTE: Removing the last VSI on a VEB has the SIDE EFFECT of removing
13198 * the VEB itself, so don't use (*branch) after this loop.
13199 */
505682cd 13200 for (i = 0; i < pf->num_alloc_vsi; i++) {
41c445ff
JB
13201 if (!pf->vsi[i])
13202 continue;
13203 if (pf->vsi[i]->uplink_seid == branch_seid &&
13204 (pf->vsi[i]->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) {
13205 i40e_vsi_release(pf->vsi[i]);
13206 }
13207 }
13208
13209 /* There's one corner case where the VEB might not have been
13210 * removed, so double check it here and remove it if needed.
13211 * This case happens if the veb was created from the debugfs
13212 * commands and no VSIs were added to it.
13213 */
13214 if (pf->veb[veb_idx])
13215 i40e_veb_release(pf->veb[veb_idx]);
13216}
13217
13218/**
13219 * i40e_veb_clear - remove veb struct
13220 * @veb: the veb to remove
13221 **/
13222static void i40e_veb_clear(struct i40e_veb *veb)
13223{
13224 if (!veb)
13225 return;
13226
13227 if (veb->pf) {
13228 struct i40e_pf *pf = veb->pf;
13229
13230 mutex_lock(&pf->switch_mutex);
13231 if (pf->veb[veb->idx] == veb)
13232 pf->veb[veb->idx] = NULL;
13233 mutex_unlock(&pf->switch_mutex);
13234 }
13235
13236 kfree(veb);
13237}
13238
13239/**
13240 * i40e_veb_release - Delete a VEB and free its resources
13241 * @veb: the VEB being removed
13242 **/
13243void i40e_veb_release(struct i40e_veb *veb)
13244{
13245 struct i40e_vsi *vsi = NULL;
13246 struct i40e_pf *pf;
13247 int i, n = 0;
13248
13249 pf = veb->pf;
13250
13251 /* find the remaining VSI and check for extras */
505682cd 13252 for (i = 0; i < pf->num_alloc_vsi; i++) {
41c445ff
JB
13253 if (pf->vsi[i] && pf->vsi[i]->uplink_seid == veb->seid) {
13254 n++;
13255 vsi = pf->vsi[i];
13256 }
13257 }
13258 if (n != 1) {
13259 dev_info(&pf->pdev->dev,
13260 "can't remove VEB %d with %d VSIs left\n",
13261 veb->seid, n);
13262 return;
13263 }
13264
13265 /* move the remaining VSI to uplink veb */
13266 vsi->flags &= ~I40E_VSI_FLAG_VEB_OWNER;
13267 if (veb->uplink_seid) {
13268 vsi->uplink_seid = veb->uplink_seid;
13269 if (veb->uplink_seid == pf->mac_seid)
13270 vsi->veb_idx = I40E_NO_VEB;
13271 else
13272 vsi->veb_idx = veb->veb_idx;
13273 } else {
13274 /* floating VEB */
13275 vsi->uplink_seid = pf->vsi[pf->lan_vsi]->uplink_seid;
13276 vsi->veb_idx = pf->vsi[pf->lan_vsi]->veb_idx;
13277 }
13278
13279 i40e_aq_delete_element(&pf->hw, veb->seid, NULL);
13280 i40e_veb_clear(veb);
41c445ff
JB
13281}
13282
13283/**
13284 * i40e_add_veb - create the VEB in the switch
13285 * @veb: the VEB to be instantiated
13286 * @vsi: the controlling VSI
13287 **/
13288static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi)
13289{
f1c7e72e 13290 struct i40e_pf *pf = veb->pf;
66fc360a 13291 bool enable_stats = !!(pf->flags & I40E_FLAG_VEB_STATS_ENABLED);
41c445ff
JB
13292 int ret;
13293
f1c7e72e 13294 ret = i40e_aq_add_veb(&pf->hw, veb->uplink_seid, vsi->seid,
5bc16031 13295 veb->enabled_tc, false,
66fc360a 13296 &veb->seid, enable_stats, NULL);
5bc16031
MW
13297
13298 /* get a VEB from the hardware */
41c445ff 13299 if (ret) {
f1c7e72e
SN
13300 dev_info(&pf->pdev->dev,
13301 "couldn't add VEB, err %s aq_err %s\n",
13302 i40e_stat_str(&pf->hw, ret),
13303 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
41c445ff
JB
13304 return -EPERM;
13305 }
13306
13307 /* get statistics counter */
f1c7e72e 13308 ret = i40e_aq_get_veb_parameters(&pf->hw, veb->seid, NULL, NULL,
41c445ff
JB
13309 &veb->stats_idx, NULL, NULL, NULL);
13310 if (ret) {
f1c7e72e
SN
13311 dev_info(&pf->pdev->dev,
13312 "couldn't get VEB statistics idx, err %s aq_err %s\n",
13313 i40e_stat_str(&pf->hw, ret),
13314 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
41c445ff
JB
13315 return -EPERM;
13316 }
13317 ret = i40e_veb_get_bw_info(veb);
13318 if (ret) {
f1c7e72e
SN
13319 dev_info(&pf->pdev->dev,
13320 "couldn't get VEB bw info, err %s aq_err %s\n",
13321 i40e_stat_str(&pf->hw, ret),
13322 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
13323 i40e_aq_delete_element(&pf->hw, veb->seid, NULL);
41c445ff
JB
13324 return -ENOENT;
13325 }
13326
13327 vsi->uplink_seid = veb->seid;
13328 vsi->veb_idx = veb->idx;
13329 vsi->flags |= I40E_VSI_FLAG_VEB_OWNER;
13330
13331 return 0;
13332}
13333
13334/**
13335 * i40e_veb_setup - Set up a VEB
13336 * @pf: board private structure
13337 * @flags: VEB setup flags
13338 * @uplink_seid: the switch element to link to
13339 * @vsi_seid: the initial VSI seid
13340 * @enabled_tc: Enabled TC bit-map
13341 *
13342 * This allocates the sw VEB structure and links it into the switch
13343 * It is possible and legal for this to be a duplicate of an already
13344 * existing VEB. It is also possible for both uplink and vsi seids
13345 * to be zero, in order to create a floating VEB.
13346 *
13347 * Returns pointer to the successfully allocated VEB sw struct on
13348 * success, otherwise returns NULL on failure.
13349 **/
13350struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf, u16 flags,
13351 u16 uplink_seid, u16 vsi_seid,
13352 u8 enabled_tc)
13353{
13354 struct i40e_veb *veb, *uplink_veb = NULL;
13355 int vsi_idx, veb_idx;
13356 int ret;
13357
13358 /* if one seid is 0, the other must be 0 to create a floating relay */
13359 if ((uplink_seid == 0 || vsi_seid == 0) &&
13360 (uplink_seid + vsi_seid != 0)) {
13361 dev_info(&pf->pdev->dev,
13362 "one, not both seid's are 0: uplink=%d vsi=%d\n",
13363 uplink_seid, vsi_seid);
13364 return NULL;
13365 }
13366
13367 /* make sure there is such a vsi and uplink */
505682cd 13368 for (vsi_idx = 0; vsi_idx < pf->num_alloc_vsi; vsi_idx++)
41c445ff
JB
13369 if (pf->vsi[vsi_idx] && pf->vsi[vsi_idx]->seid == vsi_seid)
13370 break;
cfe39699 13371 if (vsi_idx == pf->num_alloc_vsi && vsi_seid != 0) {
41c445ff
JB
13372 dev_info(&pf->pdev->dev, "vsi seid %d not found\n",
13373 vsi_seid);
13374 return NULL;
13375 }
13376
13377 if (uplink_seid && uplink_seid != pf->mac_seid) {
13378 for (veb_idx = 0; veb_idx < I40E_MAX_VEB; veb_idx++) {
13379 if (pf->veb[veb_idx] &&
13380 pf->veb[veb_idx]->seid == uplink_seid) {
13381 uplink_veb = pf->veb[veb_idx];
13382 break;
13383 }
13384 }
13385 if (!uplink_veb) {
13386 dev_info(&pf->pdev->dev,
13387 "uplink seid %d not found\n", uplink_seid);
13388 return NULL;
13389 }
13390 }
13391
13392 /* get veb sw struct */
13393 veb_idx = i40e_veb_mem_alloc(pf);
13394 if (veb_idx < 0)
13395 goto err_alloc;
13396 veb = pf->veb[veb_idx];
13397 veb->flags = flags;
13398 veb->uplink_seid = uplink_seid;
13399 veb->veb_idx = (uplink_veb ? uplink_veb->idx : I40E_NO_VEB);
13400 veb->enabled_tc = (enabled_tc ? enabled_tc : 0x1);
13401
13402 /* create the VEB in the switch */
13403 ret = i40e_add_veb(veb, pf->vsi[vsi_idx]);
13404 if (ret)
13405 goto err_veb;
1bb8b935
SN
13406 if (vsi_idx == pf->lan_vsi)
13407 pf->lan_veb = veb->idx;
41c445ff
JB
13408
13409 return veb;
13410
13411err_veb:
13412 i40e_veb_clear(veb);
13413err_alloc:
13414 return NULL;
13415}
13416
13417/**
b40c82e6 13418 * i40e_setup_pf_switch_element - set PF vars based on switch type
41c445ff
JB
13419 * @pf: board private structure
13420 * @ele: element we are building info from
13421 * @num_reported: total number of elements
13422 * @printconfig: should we print the contents
13423 *
13424 * helper function to assist in extracting a few useful SEID values.
13425 **/
13426static void i40e_setup_pf_switch_element(struct i40e_pf *pf,
13427 struct i40e_aqc_switch_config_element_resp *ele,
13428 u16 num_reported, bool printconfig)
13429{
13430 u16 downlink_seid = le16_to_cpu(ele->downlink_seid);
13431 u16 uplink_seid = le16_to_cpu(ele->uplink_seid);
13432 u8 element_type = ele->element_type;
13433 u16 seid = le16_to_cpu(ele->seid);
13434
13435 if (printconfig)
13436 dev_info(&pf->pdev->dev,
13437 "type=%d seid=%d uplink=%d downlink=%d\n",
13438 element_type, seid, uplink_seid, downlink_seid);
13439
13440 switch (element_type) {
13441 case I40E_SWITCH_ELEMENT_TYPE_MAC:
13442 pf->mac_seid = seid;
13443 break;
13444 case I40E_SWITCH_ELEMENT_TYPE_VEB:
13445 /* Main VEB? */
13446 if (uplink_seid != pf->mac_seid)
13447 break;
13448 if (pf->lan_veb == I40E_NO_VEB) {
13449 int v;
13450
13451 /* find existing or else empty VEB */
13452 for (v = 0; v < I40E_MAX_VEB; v++) {
13453 if (pf->veb[v] && (pf->veb[v]->seid == seid)) {
13454 pf->lan_veb = v;
13455 break;
13456 }
13457 }
13458 if (pf->lan_veb == I40E_NO_VEB) {
13459 v = i40e_veb_mem_alloc(pf);
13460 if (v < 0)
13461 break;
13462 pf->lan_veb = v;
13463 }
13464 }
13465
13466 pf->veb[pf->lan_veb]->seid = seid;
13467 pf->veb[pf->lan_veb]->uplink_seid = pf->mac_seid;
13468 pf->veb[pf->lan_veb]->pf = pf;
13469 pf->veb[pf->lan_veb]->veb_idx = I40E_NO_VEB;
13470 break;
13471 case I40E_SWITCH_ELEMENT_TYPE_VSI:
13472 if (num_reported != 1)
13473 break;
13474 /* This is immediately after a reset so we can assume this is
13475 * the PF's VSI
13476 */
13477 pf->mac_seid = uplink_seid;
13478 pf->pf_seid = downlink_seid;
13479 pf->main_vsi_seid = seid;
13480 if (printconfig)
13481 dev_info(&pf->pdev->dev,
13482 "pf_seid=%d main_vsi_seid=%d\n",
13483 pf->pf_seid, pf->main_vsi_seid);
13484 break;
13485 case I40E_SWITCH_ELEMENT_TYPE_PF:
13486 case I40E_SWITCH_ELEMENT_TYPE_VF:
13487 case I40E_SWITCH_ELEMENT_TYPE_EMP:
13488 case I40E_SWITCH_ELEMENT_TYPE_BMC:
13489 case I40E_SWITCH_ELEMENT_TYPE_PE:
13490 case I40E_SWITCH_ELEMENT_TYPE_PA:
13491 /* ignore these for now */
13492 break;
13493 default:
13494 dev_info(&pf->pdev->dev, "unknown element type=%d seid=%d\n",
13495 element_type, seid);
13496 break;
13497 }
13498}
13499
13500/**
13501 * i40e_fetch_switch_configuration - Get switch config from firmware
13502 * @pf: board private structure
13503 * @printconfig: should we print the contents
13504 *
13505 * Get the current switch configuration from the device and
13506 * extract a few useful SEID values.
13507 **/
13508int i40e_fetch_switch_configuration(struct i40e_pf *pf, bool printconfig)
13509{
13510 struct i40e_aqc_get_switch_config_resp *sw_config;
13511 u16 next_seid = 0;
13512 int ret = 0;
13513 u8 *aq_buf;
13514 int i;
13515
13516 aq_buf = kzalloc(I40E_AQ_LARGE_BUF, GFP_KERNEL);
13517 if (!aq_buf)
13518 return -ENOMEM;
13519
13520 sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
13521 do {
13522 u16 num_reported, num_total;
13523
13524 ret = i40e_aq_get_switch_config(&pf->hw, sw_config,
13525 I40E_AQ_LARGE_BUF,
13526 &next_seid, NULL);
13527 if (ret) {
13528 dev_info(&pf->pdev->dev,
f1c7e72e
SN
13529 "get switch config failed err %s aq_err %s\n",
13530 i40e_stat_str(&pf->hw, ret),
13531 i40e_aq_str(&pf->hw,
13532 pf->hw.aq.asq_last_status));
41c445ff
JB
13533 kfree(aq_buf);
13534 return -ENOENT;
13535 }
13536
13537 num_reported = le16_to_cpu(sw_config->header.num_reported);
13538 num_total = le16_to_cpu(sw_config->header.num_total);
13539
13540 if (printconfig)
13541 dev_info(&pf->pdev->dev,
13542 "header: %d reported %d total\n",
13543 num_reported, num_total);
13544
41c445ff
JB
13545 for (i = 0; i < num_reported; i++) {
13546 struct i40e_aqc_switch_config_element_resp *ele =
13547 &sw_config->element[i];
13548
13549 i40e_setup_pf_switch_element(pf, ele, num_reported,
13550 printconfig);
13551 }
13552 } while (next_seid != 0);
13553
13554 kfree(aq_buf);
13555 return ret;
13556}
13557
13558/**
13559 * i40e_setup_pf_switch - Setup the HW switch on startup or after reset
13560 * @pf: board private structure
bc7d338f 13561 * @reinit: if the Main VSI needs to re-initialized.
41c445ff
JB
13562 *
13563 * Returns 0 on success, negative value on failure
13564 **/
bc7d338f 13565static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit)
41c445ff 13566{
b5569892 13567 u16 flags = 0;
41c445ff
JB
13568 int ret;
13569
13570 /* find out what's out there already */
13571 ret = i40e_fetch_switch_configuration(pf, false);
13572 if (ret) {
13573 dev_info(&pf->pdev->dev,
f1c7e72e
SN
13574 "couldn't fetch switch config, err %s aq_err %s\n",
13575 i40e_stat_str(&pf->hw, ret),
13576 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
41c445ff
JB
13577 return ret;
13578 }
13579 i40e_pf_reset_stats(pf);
13580
b5569892
ASJ
13581 /* set the switch config bit for the whole device to
13582 * support limited promisc or true promisc
13583 * when user requests promisc. The default is limited
13584 * promisc.
13585 */
13586
13587 if ((pf->hw.pf_id == 0) &&
2f4b411a 13588 !(pf->flags & I40E_FLAG_TRUE_PROMISC_SUPPORT)) {
b5569892 13589 flags = I40E_AQ_SET_SWITCH_CFG_PROMISC;
2f4b411a
AN
13590 pf->last_sw_conf_flags = flags;
13591 }
b5569892
ASJ
13592
13593 if (pf->hw.pf_id == 0) {
13594 u16 valid_flags;
13595
13596 valid_flags = I40E_AQ_SET_SWITCH_CFG_PROMISC;
5efe0c6c 13597 ret = i40e_aq_set_switch_config(&pf->hw, flags, valid_flags, 0,
b5569892
ASJ
13598 NULL);
13599 if (ret && pf->hw.aq.asq_last_status != I40E_AQ_RC_ESRCH) {
13600 dev_info(&pf->pdev->dev,
13601 "couldn't set switch config bits, err %s aq_err %s\n",
13602 i40e_stat_str(&pf->hw, ret),
13603 i40e_aq_str(&pf->hw,
13604 pf->hw.aq.asq_last_status));
13605 /* not a fatal problem, just keep going */
13606 }
2f4b411a 13607 pf->last_sw_conf_valid_flags = valid_flags;
b5569892
ASJ
13608 }
13609
41c445ff 13610 /* first time setup */
bc7d338f 13611 if (pf->lan_vsi == I40E_NO_VSI || reinit) {
41c445ff
JB
13612 struct i40e_vsi *vsi = NULL;
13613 u16 uplink_seid;
13614
13615 /* Set up the PF VSI associated with the PF's main VSI
13616 * that is already in the HW switch
13617 */
13618 if (pf->lan_veb != I40E_NO_VEB && pf->veb[pf->lan_veb])
13619 uplink_seid = pf->veb[pf->lan_veb]->seid;
13620 else
13621 uplink_seid = pf->mac_seid;
bc7d338f
ASJ
13622 if (pf->lan_vsi == I40E_NO_VSI)
13623 vsi = i40e_vsi_setup(pf, I40E_VSI_MAIN, uplink_seid, 0);
13624 else if (reinit)
13625 vsi = i40e_vsi_reinit_setup(pf->vsi[pf->lan_vsi]);
41c445ff
JB
13626 if (!vsi) {
13627 dev_info(&pf->pdev->dev, "setup of MAIN VSI failed\n");
aaf66502 13628 i40e_cloud_filter_exit(pf);
41c445ff
JB
13629 i40e_fdir_teardown(pf);
13630 return -EAGAIN;
13631 }
41c445ff
JB
13632 } else {
13633 /* force a reset of TC and queue layout configurations */
13634 u8 enabled_tc = pf->vsi[pf->lan_vsi]->tc_config.enabled_tc;
6995b36c 13635
41c445ff
JB
13636 pf->vsi[pf->lan_vsi]->tc_config.enabled_tc = 0;
13637 pf->vsi[pf->lan_vsi]->seid = pf->main_vsi_seid;
13638 i40e_vsi_config_tc(pf->vsi[pf->lan_vsi], enabled_tc);
13639 }
13640 i40e_vlan_stripping_disable(pf->vsi[pf->lan_vsi]);
13641
cbf61325
ASJ
13642 i40e_fdir_sb_setup(pf);
13643
41c445ff
JB
13644 /* Setup static PF queue filter control settings */
13645 ret = i40e_setup_pf_filter_control(pf);
13646 if (ret) {
13647 dev_info(&pf->pdev->dev, "setup_pf_filter_control failed: %d\n",
13648 ret);
13649 /* Failure here should not stop continuing other steps */
13650 }
13651
13652 /* enable RSS in the HW, even for only one queue, as the stack can use
13653 * the hash
13654 */
13655 if ((pf->flags & I40E_FLAG_RSS_ENABLED))
043dd650 13656 i40e_pf_config_rss(pf);
41c445ff
JB
13657
13658 /* fill in link information and enable LSE reporting */
a34a6711
MW
13659 i40e_link_event(pf);
13660
d52c20b7 13661 /* Initialize user-specific link properties */
41c445ff
JB
13662 pf->fc_autoneg_status = ((pf->hw.phy.link_info.an_info &
13663 I40E_AQ_AN_COMPLETED) ? true : false);
d52c20b7 13664
beb0dff1
JK
13665 i40e_ptp_init(pf);
13666
1f190d93
AD
13667 /* repopulate tunnel port filters */
13668 i40e_sync_udp_filters(pf);
13669
41c445ff
JB
13670 return ret;
13671}
13672
41c445ff
JB
13673/**
13674 * i40e_determine_queue_usage - Work out queue distribution
13675 * @pf: board private structure
13676 **/
13677static void i40e_determine_queue_usage(struct i40e_pf *pf)
13678{
41c445ff 13679 int queues_left;
e50d5751 13680 int q_max;
41c445ff
JB
13681
13682 pf->num_lan_qps = 0;
41c445ff
JB
13683
13684 /* Find the max queues to be put into basic use. We'll always be
13685 * using TC0, whether or not DCB is running, and TC0 will get the
13686 * big RSS set.
13687 */
13688 queues_left = pf->hw.func_caps.num_tx_qp;
13689
cbf61325 13690 if ((queues_left == 1) ||
9aa7e935 13691 !(pf->flags & I40E_FLAG_MSIX_ENABLED)) {
41c445ff
JB
13692 /* one qp for PF, no queues for anything else */
13693 queues_left = 0;
acd65448 13694 pf->alloc_rss_size = pf->num_lan_qps = 1;
41c445ff
JB
13695
13696 /* make sure all the fancies are disabled */
60ea5f83 13697 pf->flags &= ~(I40E_FLAG_RSS_ENABLED |
e3219ce6 13698 I40E_FLAG_IWARP_ENABLED |
60ea5f83
JB
13699 I40E_FLAG_FD_SB_ENABLED |
13700 I40E_FLAG_FD_ATR_ENABLED |
4d9b6043 13701 I40E_FLAG_DCB_CAPABLE |
a036244c 13702 I40E_FLAG_DCB_ENABLED |
60ea5f83
JB
13703 I40E_FLAG_SRIOV_ENABLED |
13704 I40E_FLAG_VMDQ_ENABLED);
2f4b411a 13705 pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
9aa7e935
FZ
13706 } else if (!(pf->flags & (I40E_FLAG_RSS_ENABLED |
13707 I40E_FLAG_FD_SB_ENABLED |
bbe7d0e0 13708 I40E_FLAG_FD_ATR_ENABLED |
4d9b6043 13709 I40E_FLAG_DCB_CAPABLE))) {
9aa7e935 13710 /* one qp for PF */
acd65448 13711 pf->alloc_rss_size = pf->num_lan_qps = 1;
9aa7e935
FZ
13712 queues_left -= pf->num_lan_qps;
13713
13714 pf->flags &= ~(I40E_FLAG_RSS_ENABLED |
e3219ce6 13715 I40E_FLAG_IWARP_ENABLED |
9aa7e935
FZ
13716 I40E_FLAG_FD_SB_ENABLED |
13717 I40E_FLAG_FD_ATR_ENABLED |
13718 I40E_FLAG_DCB_ENABLED |
13719 I40E_FLAG_VMDQ_ENABLED);
2f4b411a 13720 pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
41c445ff 13721 } else {
cbf61325 13722 /* Not enough queues for all TCs */
4d9b6043 13723 if ((pf->flags & I40E_FLAG_DCB_CAPABLE) &&
cbf61325 13724 (queues_left < I40E_MAX_TRAFFIC_CLASS)) {
a036244c
DE
13725 pf->flags &= ~(I40E_FLAG_DCB_CAPABLE |
13726 I40E_FLAG_DCB_ENABLED);
cbf61325
ASJ
13727 dev_info(&pf->pdev->dev, "not enough queues for DCB. DCB is disabled.\n");
13728 }
e50d5751
SN
13729
13730 /* limit lan qps to the smaller of qps, cpus or msix */
13731 q_max = max_t(int, pf->rss_size_max, num_online_cpus());
13732 q_max = min_t(int, q_max, pf->hw.func_caps.num_tx_qp);
13733 q_max = min_t(int, q_max, pf->hw.func_caps.num_msix_vectors);
13734 pf->num_lan_qps = q_max;
9a3bd2f1 13735
cbf61325
ASJ
13736 queues_left -= pf->num_lan_qps;
13737 }
13738
13739 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
13740 if (queues_left > 1) {
13741 queues_left -= 1; /* save 1 queue for FD */
13742 } else {
13743 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
2f4b411a 13744 pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
cbf61325
ASJ
13745 dev_info(&pf->pdev->dev, "not enough queues for Flow Director. Flow Director feature is disabled\n");
13746 }
41c445ff
JB
13747 }
13748
13749 if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
13750 pf->num_vf_qps && pf->num_req_vfs && queues_left) {
cbf61325
ASJ
13751 pf->num_req_vfs = min_t(int, pf->num_req_vfs,
13752 (queues_left / pf->num_vf_qps));
41c445ff
JB
13753 queues_left -= (pf->num_req_vfs * pf->num_vf_qps);
13754 }
13755
13756 if ((pf->flags & I40E_FLAG_VMDQ_ENABLED) &&
13757 pf->num_vmdq_vsis && pf->num_vmdq_qps && queues_left) {
13758 pf->num_vmdq_vsis = min_t(int, pf->num_vmdq_vsis,
13759 (queues_left / pf->num_vmdq_qps));
13760 queues_left -= (pf->num_vmdq_vsis * pf->num_vmdq_qps);
13761 }
13762
f8ff1464 13763 pf->queues_left = queues_left;
8279e495
NP
13764 dev_dbg(&pf->pdev->dev,
13765 "qs_avail=%d FD SB=%d lan_qs=%d lan_tc0=%d vf=%d*%d vmdq=%d*%d, remaining=%d\n",
13766 pf->hw.func_caps.num_tx_qp,
13767 !!(pf->flags & I40E_FLAG_FD_SB_ENABLED),
acd65448
HZ
13768 pf->num_lan_qps, pf->alloc_rss_size, pf->num_req_vfs,
13769 pf->num_vf_qps, pf->num_vmdq_vsis, pf->num_vmdq_qps,
13770 queues_left);
41c445ff
JB
13771}
13772
13773/**
13774 * i40e_setup_pf_filter_control - Setup PF static filter control
13775 * @pf: PF to be setup
13776 *
b40c82e6 13777 * i40e_setup_pf_filter_control sets up a PF's initial filter control
41c445ff
JB
13778 * settings. If PE/FCoE are enabled then it will also set the per PF
13779 * based filter sizes required for them. It also enables Flow director,
13780 * ethertype and macvlan type filter settings for the pf.
13781 *
13782 * Returns 0 on success, negative on failure
13783 **/
13784static int i40e_setup_pf_filter_control(struct i40e_pf *pf)
13785{
13786 struct i40e_filter_control_settings *settings = &pf->filter_settings;
13787
13788 settings->hash_lut_size = I40E_HASH_LUT_SIZE_128;
13789
13790 /* Flow Director is enabled */
60ea5f83 13791 if (pf->flags & (I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED))
41c445ff
JB
13792 settings->enable_fdir = true;
13793
13794 /* Ethtype and MACVLAN filters enabled for PF */
13795 settings->enable_ethtype = true;
13796 settings->enable_macvlan = true;
13797
13798 if (i40e_set_filter_control(&pf->hw, settings))
13799 return -ENOENT;
13800
13801 return 0;
13802}
13803
0c22b3dd 13804#define INFO_STRING_LEN 255
7fd89545 13805#define REMAIN(__x) (INFO_STRING_LEN - (__x))
0c22b3dd
JB
13806static void i40e_print_features(struct i40e_pf *pf)
13807{
13808 struct i40e_hw *hw = &pf->hw;
3b195843
JP
13809 char *buf;
13810 int i;
0c22b3dd 13811
3b195843
JP
13812 buf = kmalloc(INFO_STRING_LEN, GFP_KERNEL);
13813 if (!buf)
0c22b3dd 13814 return;
0c22b3dd 13815
3b195843 13816 i = snprintf(buf, INFO_STRING_LEN, "Features: PF-id[%d]", hw->pf_id);
0c22b3dd 13817#ifdef CONFIG_PCI_IOV
3b195843 13818 i += snprintf(&buf[i], REMAIN(i), " VFs: %d", pf->num_req_vfs);
0c22b3dd 13819#endif
1a557afc 13820 i += snprintf(&buf[i], REMAIN(i), " VSIs: %d QP: %d",
7fd89545 13821 pf->hw.func_caps.num_vsis,
1a557afc 13822 pf->vsi[pf->lan_vsi]->num_queue_pairs);
0c22b3dd 13823 if (pf->flags & I40E_FLAG_RSS_ENABLED)
3b195843 13824 i += snprintf(&buf[i], REMAIN(i), " RSS");
0c22b3dd 13825 if (pf->flags & I40E_FLAG_FD_ATR_ENABLED)
3b195843 13826 i += snprintf(&buf[i], REMAIN(i), " FD_ATR");
c6423ff1 13827 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
3b195843
JP
13828 i += snprintf(&buf[i], REMAIN(i), " FD_SB");
13829 i += snprintf(&buf[i], REMAIN(i), " NTUPLE");
c6423ff1 13830 }
4d9b6043 13831 if (pf->flags & I40E_FLAG_DCB_CAPABLE)
3b195843 13832 i += snprintf(&buf[i], REMAIN(i), " DCB");
3b195843 13833 i += snprintf(&buf[i], REMAIN(i), " VxLAN");
6a899024 13834 i += snprintf(&buf[i], REMAIN(i), " Geneve");
0c22b3dd 13835 if (pf->flags & I40E_FLAG_PTP)
3b195843 13836 i += snprintf(&buf[i], REMAIN(i), " PTP");
6dec1017 13837 if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED)
3b195843 13838 i += snprintf(&buf[i], REMAIN(i), " VEB");
6dec1017 13839 else
3b195843 13840 i += snprintf(&buf[i], REMAIN(i), " VEPA");
0c22b3dd 13841
3b195843
JP
13842 dev_info(&pf->pdev->dev, "%s\n", buf);
13843 kfree(buf);
7fd89545 13844 WARN_ON(i > INFO_STRING_LEN);
0c22b3dd
JB
13845}
13846
b499ffb0
SV
13847/**
13848 * i40e_get_platform_mac_addr - get platform-specific MAC address
b499ffb0
SV
13849 * @pdev: PCI device information struct
13850 * @pf: board private structure
13851 *
41c4c2b5
JK
13852 * Look up the MAC address for the device. First we'll try
13853 * eth_platform_get_mac_address, which will check Open Firmware, or arch
13854 * specific fallback. Otherwise, we'll default to the stored value in
13855 * firmware.
b499ffb0
SV
13856 **/
13857static void i40e_get_platform_mac_addr(struct pci_dev *pdev, struct i40e_pf *pf)
13858{
41c4c2b5
JK
13859 if (eth_platform_get_mac_address(&pdev->dev, pf->hw.mac.addr))
13860 i40e_get_mac_addr(&pf->hw, pf->hw.mac.addr);
b499ffb0
SV
13861}
13862
1d963401
DD
13863/**
13864 * i40e_set_fec_in_flags - helper function for setting FEC options in flags
13865 * @fec_cfg: FEC option to set in flags
13866 * @flags: ptr to flags in which we set FEC option
13867 **/
13868void i40e_set_fec_in_flags(u8 fec_cfg, u32 *flags)
13869{
13870 if (fec_cfg & I40E_AQ_SET_FEC_AUTO)
13871 *flags |= I40E_FLAG_RS_FEC | I40E_FLAG_BASE_R_FEC;
13872 if ((fec_cfg & I40E_AQ_SET_FEC_REQUEST_RS) ||
13873 (fec_cfg & I40E_AQ_SET_FEC_ABILITY_RS)) {
13874 *flags |= I40E_FLAG_RS_FEC;
13875 *flags &= ~I40E_FLAG_BASE_R_FEC;
13876 }
13877 if ((fec_cfg & I40E_AQ_SET_FEC_REQUEST_KR) ||
13878 (fec_cfg & I40E_AQ_SET_FEC_ABILITY_KR)) {
13879 *flags |= I40E_FLAG_BASE_R_FEC;
13880 *flags &= ~I40E_FLAG_RS_FEC;
13881 }
13882 if (fec_cfg == 0)
13883 *flags &= ~(I40E_FLAG_RS_FEC | I40E_FLAG_BASE_R_FEC);
13884}
13885
41c445ff
JB
13886/**
13887 * i40e_probe - Device initialization routine
13888 * @pdev: PCI device information struct
13889 * @ent: entry in i40e_pci_tbl
13890 *
b40c82e6
JK
13891 * i40e_probe initializes a PF identified by a pci_dev structure.
13892 * The OS initialization, configuring of the PF private structure,
41c445ff
JB
13893 * and a hardware reset occur.
13894 *
13895 * Returns 0 on success, negative on failure
13896 **/
13897static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
13898{
e827845c 13899 struct i40e_aq_get_phy_abilities_resp abilities;
41c445ff
JB
13900 struct i40e_pf *pf;
13901 struct i40e_hw *hw;
93cd765b 13902 static u16 pfs_found;
1d5109d1 13903 u16 wol_nvm_bits;
d4dfb81a 13904 u16 link_status;
6f66a484 13905 int err;
4f2f017c 13906 u32 val;
8a9eb7d3 13907 u32 i;
58fc3267 13908 u8 set_fc_aq_fail;
41c445ff
JB
13909
13910 err = pci_enable_device_mem(pdev);
13911 if (err)
13912 return err;
13913
13914 /* set up for high or low dma */
6494294f 13915 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
6494294f 13916 if (err) {
e3e3bfdd
JS
13917 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
13918 if (err) {
13919 dev_err(&pdev->dev,
13920 "DMA configuration failed: 0x%x\n", err);
13921 goto err_dma;
13922 }
41c445ff
JB
13923 }
13924
13925 /* set up pci connections */
56d766d6 13926 err = pci_request_mem_regions(pdev, i40e_driver_name);
41c445ff
JB
13927 if (err) {
13928 dev_info(&pdev->dev,
13929 "pci_request_selected_regions failed %d\n", err);
13930 goto err_pci_reg;
13931 }
13932
13933 pci_enable_pcie_error_reporting(pdev);
13934 pci_set_master(pdev);
13935
13936 /* Now that we have a PCI connection, we need to do the
13937 * low level device setup. This is primarily setting up
13938 * the Admin Queue structures and then querying for the
13939 * device's current profile information.
13940 */
13941 pf = kzalloc(sizeof(*pf), GFP_KERNEL);
13942 if (!pf) {
13943 err = -ENOMEM;
13944 goto err_pf_alloc;
13945 }
13946 pf->next_vsi = 0;
13947 pf->pdev = pdev;
9e6c9c0f 13948 set_bit(__I40E_DOWN, pf->state);
41c445ff
JB
13949
13950 hw = &pf->hw;
13951 hw->back = pf;
232f4706 13952
2ac8b675
SN
13953 pf->ioremap_len = min_t(int, pci_resource_len(pdev, 0),
13954 I40E_MAX_CSR_SPACE);
232f4706 13955
2ac8b675 13956 hw->hw_addr = ioremap(pci_resource_start(pdev, 0), pf->ioremap_len);
41c445ff
JB
13957 if (!hw->hw_addr) {
13958 err = -EIO;
13959 dev_info(&pdev->dev, "ioremap(0x%04x, 0x%04x) failed: 0x%x\n",
13960 (unsigned int)pci_resource_start(pdev, 0),
2ac8b675 13961 pf->ioremap_len, err);
41c445ff
JB
13962 goto err_ioremap;
13963 }
13964 hw->vendor_id = pdev->vendor;
13965 hw->device_id = pdev->device;
13966 pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
13967 hw->subsystem_vendor_id = pdev->subsystem_vendor;
13968 hw->subsystem_device_id = pdev->subsystem_device;
13969 hw->bus.device = PCI_SLOT(pdev->devfn);
13970 hw->bus.func = PCI_FUNC(pdev->devfn);
b3f028fc 13971 hw->bus.bus_id = pdev->bus->number;
93cd765b 13972 pf->instance = pfs_found;
41c445ff 13973
ab243ec9
SP
13974 /* Select something other than the 802.1ad ethertype for the
13975 * switch to use internally and drop on ingress.
13976 */
13977 hw->switch_tag = 0xffff;
13978 hw->first_tag = ETH_P_8021AD;
13979 hw->second_tag = ETH_P_8021Q;
13980
0e588de1
JK
13981 INIT_LIST_HEAD(&pf->l3_flex_pit_list);
13982 INIT_LIST_HEAD(&pf->l4_flex_pit_list);
13983
de03d2b0
SN
13984 /* set up the locks for the AQ, do this only once in probe
13985 * and destroy them only once in remove
13986 */
13987 mutex_init(&hw->aq.asq_mutex);
13988 mutex_init(&hw->aq.arq_mutex);
13989
5d4ca23e
AD
13990 pf->msg_enable = netif_msg_init(debug,
13991 NETIF_MSG_DRV |
13992 NETIF_MSG_PROBE |
13993 NETIF_MSG_LINK);
13994 if (debug < -1)
13995 pf->hw.debug_mask = debug;
5b5faa43 13996
7134f9ce
JB
13997 /* do a special CORER for clearing PXE mode once at init */
13998 if (hw->revision_id == 0 &&
13999 (rd32(hw, I40E_GLLAN_RCTL_0) & I40E_GLLAN_RCTL_0_PXE_MODE_MASK)) {
14000 wr32(hw, I40E_GLGEN_RTRIG, I40E_GLGEN_RTRIG_CORER_MASK);
14001 i40e_flush(hw);
14002 msleep(200);
14003 pf->corer_count++;
14004
14005 i40e_clear_pxe_mode(hw);
14006 }
14007
41c445ff 14008 /* Reset here to make sure all is clean and to define PF 'n' */
838d41d9 14009 i40e_clear_hw(hw);
41c445ff
JB
14010 err = i40e_pf_reset(hw);
14011 if (err) {
14012 dev_info(&pdev->dev, "Initial pf_reset failed: %d\n", err);
14013 goto err_pf_reset;
14014 }
14015 pf->pfr_count++;
14016
14017 hw->aq.num_arq_entries = I40E_AQ_LEN;
14018 hw->aq.num_asq_entries = I40E_AQ_LEN;
14019 hw->aq.arq_buf_size = I40E_MAX_AQ_BUF_SIZE;
14020 hw->aq.asq_buf_size = I40E_MAX_AQ_BUF_SIZE;
14021 pf->adminq_work_limit = I40E_AQ_WORK_LIMIT;
b2008cbf 14022
b294ac70 14023 snprintf(pf->int_name, sizeof(pf->int_name) - 1,
b2008cbf
CW
14024 "%s-%s:misc",
14025 dev_driver_string(&pf->pdev->dev), dev_name(&pdev->dev));
41c445ff
JB
14026
14027 err = i40e_init_shared_code(hw);
14028 if (err) {
b2a75c58
ASJ
14029 dev_warn(&pdev->dev, "unidentified MAC or BLANK NVM: %d\n",
14030 err);
41c445ff
JB
14031 goto err_pf_reset;
14032 }
14033
d52c20b7
JB
14034 /* set up a default setting for link flow control */
14035 pf->hw.fc.requested_mode = I40E_FC_NONE;
14036
41c445ff 14037 err = i40e_init_adminq(hw);
2b2426a7
CW
14038 if (err) {
14039 if (err == I40E_ERR_FIRMWARE_API_VERSION)
14040 dev_info(&pdev->dev,
14041 "The driver for the device stopped because the NVM image is newer than expected. You must install the most recent version of the network driver.\n");
14042 else
14043 dev_info(&pdev->dev,
14044 "The driver for the device stopped because the device firmware failed to init. Try updating your NVM image.\n");
14045
14046 goto err_pf_reset;
14047 }
5bbb2e20 14048 i40e_get_oem_version(hw);
f0b44440 14049
6dec1017
SN
14050 /* provide nvm, fw, api versions */
14051 dev_info(&pdev->dev, "fw %d.%d.%05d api %d.%d nvm %s\n",
14052 hw->aq.fw_maj_ver, hw->aq.fw_min_ver, hw->aq.fw_build,
14053 hw->aq.api_maj_ver, hw->aq.api_min_ver,
14054 i40e_nvm_version_str(hw));
f0b44440 14055
7aa67613 14056 if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
22b96551 14057 hw->aq.api_min_ver > I40E_FW_MINOR_VERSION(hw))
278b6f62 14058 dev_info(&pdev->dev,
7aa67613 14059 "The driver for the device detected a newer version of the NVM image than expected. Please install the most recent version of the network driver.\n");
e04ea002 14060 else if (hw->aq.api_maj_ver == 1 && hw->aq.api_min_ver < 4)
278b6f62 14061 dev_info(&pdev->dev,
7aa67613 14062 "The driver for the device detected an older version of the NVM image than expected. Please update the NVM image.\n");
278b6f62 14063
4eb3f768
SN
14064 i40e_verify_eeprom(pf);
14065
2c5fe33b
JB
14066 /* Rev 0 hardware was never productized */
14067 if (hw->revision_id < 1)
14068 dev_warn(&pdev->dev, "This device is a pre-production adapter/LOM. Please be aware there may be issues with your hardware. If you are experiencing problems please contact your Intel or hardware representative who provided you with this hardware.\n");
14069
6ff4ef86 14070 i40e_clear_pxe_mode(hw);
2f4b411a 14071 err = i40e_get_capabilities(pf, i40e_aqc_opc_list_func_capabilities);
41c445ff
JB
14072 if (err)
14073 goto err_adminq_setup;
14074
14075 err = i40e_sw_init(pf);
14076 if (err) {
14077 dev_info(&pdev->dev, "sw_init failed: %d\n", err);
14078 goto err_sw_init;
14079 }
14080
14081 err = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
c76cb6ed 14082 hw->func_caps.num_rx_qp, 0, 0);
41c445ff
JB
14083 if (err) {
14084 dev_info(&pdev->dev, "init_lan_hmc failed: %d\n", err);
14085 goto err_init_lan_hmc;
14086 }
14087
14088 err = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
14089 if (err) {
14090 dev_info(&pdev->dev, "configure_lan_hmc failed: %d\n", err);
14091 err = -ENOENT;
14092 goto err_configure_lan_hmc;
14093 }
14094
b686ece5
NP
14095 /* Disable LLDP for NICs that have firmware versions lower than v4.3.
14096 * Ignore error return codes because if it was already disabled via
14097 * hardware settings this will fail
14098 */
d36e41dc 14099 if (pf->hw_features & I40E_HW_STOP_FW_LLDP) {
b686ece5
NP
14100 dev_info(&pdev->dev, "Stopping firmware LLDP agent.\n");
14101 i40e_aq_stop_lldp(hw, true, NULL);
14102 }
14103
b499ffb0
SV
14104 /* allow a platform config to override the HW addr */
14105 i40e_get_platform_mac_addr(pdev, pf);
41c4c2b5 14106
f62b5060 14107 if (!is_valid_ether_addr(hw->mac.addr)) {
41c445ff
JB
14108 dev_info(&pdev->dev, "invalid MAC address %pM\n", hw->mac.addr);
14109 err = -EIO;
14110 goto err_mac_addr;
14111 }
14112 dev_info(&pdev->dev, "MAC address: %pM\n", hw->mac.addr);
9a173901 14113 ether_addr_copy(hw->mac.perm_addr, hw->mac.addr);
1f224ad2
NP
14114 i40e_get_port_mac_addr(hw, hw->mac.port_addr);
14115 if (is_valid_ether_addr(hw->mac.port_addr))
d36e41dc 14116 pf->hw_features |= I40E_HW_PORT_ID_VALID;
41c445ff
JB
14117
14118 pci_set_drvdata(pdev, pf);
14119 pci_save_state(pdev);
c61c8fe1
DE
14120
14121 /* Enable FW to write default DCB config on link-up */
14122 i40e_aq_set_dcb_parameters(hw, true, NULL);
14123
4e3b35b0
NP
14124#ifdef CONFIG_I40E_DCB
14125 err = i40e_init_pf_dcb(pf);
14126 if (err) {
aebfc816 14127 dev_info(&pdev->dev, "DCB init failed %d, disabled\n", err);
c17ef430 14128 pf->flags &= ~(I40E_FLAG_DCB_CAPABLE | I40E_FLAG_DCB_ENABLED);
014269ff 14129 /* Continue without DCB enabled */
4e3b35b0
NP
14130 }
14131#endif /* CONFIG_I40E_DCB */
41c445ff
JB
14132
14133 /* set up periodic task facility */
26566eae 14134 timer_setup(&pf->service_timer, i40e_service_timer, 0);
41c445ff
JB
14135 pf->service_timer_period = HZ;
14136
14137 INIT_WORK(&pf->service_task, i40e_service_task);
0da36b97 14138 clear_bit(__I40E_SERVICE_SCHED, pf->state);
41c445ff 14139
1d5109d1
SN
14140 /* NVM bit on means WoL disabled for the port */
14141 i40e_read_nvm_word(hw, I40E_SR_NVM_WAKE_ON_LAN, &wol_nvm_bits);
75f5cea9 14142 if (BIT (hw->port) & wol_nvm_bits || hw->partition_id != 1)
1d5109d1
SN
14143 pf->wol_en = false;
14144 else
14145 pf->wol_en = true;
8e2773ae
SN
14146 device_set_wakeup_enable(&pf->pdev->dev, pf->wol_en);
14147
41c445ff
JB
14148 /* set up the main switch operations */
14149 i40e_determine_queue_usage(pf);
c1147280
JB
14150 err = i40e_init_interrupt_scheme(pf);
14151 if (err)
14152 goto err_switch_setup;
41c445ff 14153
505682cd
MW
14154 /* The number of VSIs reported by the FW is the minimum guaranteed
14155 * to us; HW supports far more and we share the remaining pool with
14156 * the other PFs. We allocate space for more than the guarantee with
14157 * the understanding that we might not get them all later.
41c445ff 14158 */
505682cd
MW
14159 if (pf->hw.func_caps.num_vsis < I40E_MIN_VSI_ALLOC)
14160 pf->num_alloc_vsi = I40E_MIN_VSI_ALLOC;
14161 else
14162 pf->num_alloc_vsi = pf->hw.func_caps.num_vsis;
14163
14164 /* Set up the *vsi struct and our local tracking of the MAIN PF vsi. */
d17038d6
JB
14165 pf->vsi = kcalloc(pf->num_alloc_vsi, sizeof(struct i40e_vsi *),
14166 GFP_KERNEL);
ed87ac09
WY
14167 if (!pf->vsi) {
14168 err = -ENOMEM;
41c445ff 14169 goto err_switch_setup;
ed87ac09 14170 }
41c445ff 14171
fa11cb3d
ASJ
14172#ifdef CONFIG_PCI_IOV
14173 /* prep for VF support */
14174 if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
14175 (pf->flags & I40E_FLAG_MSIX_ENABLED) &&
0da36b97 14176 !test_bit(__I40E_BAD_EEPROM, pf->state)) {
fa11cb3d
ASJ
14177 if (pci_num_vf(pdev))
14178 pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
14179 }
14180#endif
bc7d338f 14181 err = i40e_setup_pf_switch(pf, false);
41c445ff
JB
14182 if (err) {
14183 dev_info(&pdev->dev, "setup_pf_switch failed: %d\n", err);
14184 goto err_vsis;
14185 }
8f88b303 14186 INIT_LIST_HEAD(&pf->vsi[pf->lan_vsi]->ch_list);
58fc3267
HZ
14187
14188 /* Make sure flow control is set according to current settings */
14189 err = i40e_set_fc(hw, &set_fc_aq_fail, true);
14190 if (set_fc_aq_fail & I40E_SET_FC_AQ_FAIL_GET)
14191 dev_dbg(&pf->pdev->dev,
14192 "Set fc with err %s aq_err %s on get_phy_cap\n",
14193 i40e_stat_str(hw, err),
14194 i40e_aq_str(hw, hw->aq.asq_last_status));
14195 if (set_fc_aq_fail & I40E_SET_FC_AQ_FAIL_SET)
14196 dev_dbg(&pf->pdev->dev,
14197 "Set fc with err %s aq_err %s on set_phy_config\n",
14198 i40e_stat_str(hw, err),
14199 i40e_aq_str(hw, hw->aq.asq_last_status));
14200 if (set_fc_aq_fail & I40E_SET_FC_AQ_FAIL_UPDATE)
14201 dev_dbg(&pf->pdev->dev,
14202 "Set fc with err %s aq_err %s on get_link_info\n",
14203 i40e_stat_str(hw, err),
14204 i40e_aq_str(hw, hw->aq.asq_last_status));
14205
8a9eb7d3 14206 /* if FDIR VSI was set up, start it now */
505682cd 14207 for (i = 0; i < pf->num_alloc_vsi; i++) {
8a9eb7d3
SN
14208 if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) {
14209 i40e_vsi_open(pf->vsi[i]);
14210 break;
14211 }
14212 }
41c445ff 14213
2f0aff41
SN
14214 /* The driver only wants link up/down and module qualification
14215 * reports from firmware. Note the negative logic.
7e2453fe
JB
14216 */
14217 err = i40e_aq_set_phy_int_mask(&pf->hw,
2f0aff41 14218 ~(I40E_AQ_EVENT_LINK_UPDOWN |
867a79e3 14219 I40E_AQ_EVENT_MEDIA_NA |
2f0aff41 14220 I40E_AQ_EVENT_MODULE_QUAL_FAIL), NULL);
7e2453fe 14221 if (err)
f1c7e72e
SN
14222 dev_info(&pf->pdev->dev, "set phy mask fail, err %s aq_err %s\n",
14223 i40e_stat_str(&pf->hw, err),
14224 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
7e2453fe 14225
4f2f017c
ASJ
14226 /* Reconfigure hardware for allowing smaller MSS in the case
14227 * of TSO, so that we avoid the MDD being fired and causing
14228 * a reset in the case of small MSS+TSO.
14229 */
14230 val = rd32(hw, I40E_REG_MSS);
14231 if ((val & I40E_REG_MSS_MIN_MASK) > I40E_64BYTE_MSS) {
14232 val &= ~I40E_REG_MSS_MIN_MASK;
14233 val |= I40E_64BYTE_MSS;
14234 wr32(hw, I40E_REG_MSS, val);
14235 }
14236
d36e41dc 14237 if (pf->hw_features & I40E_HW_RESTART_AUTONEG) {
025b4a54
ASJ
14238 msleep(75);
14239 err = i40e_aq_set_link_restart_an(&pf->hw, true, NULL);
14240 if (err)
f1c7e72e
SN
14241 dev_info(&pf->pdev->dev, "link restart failed, err %s aq_err %s\n",
14242 i40e_stat_str(&pf->hw, err),
14243 i40e_aq_str(&pf->hw,
14244 pf->hw.aq.asq_last_status));
cafa2ee6 14245 }
41c445ff
JB
14246 /* The main driver is (mostly) up and happy. We need to set this state
14247 * before setting up the misc vector or we get a race and the vector
14248 * ends up disabled forever.
14249 */
9e6c9c0f 14250 clear_bit(__I40E_DOWN, pf->state);
41c445ff
JB
14251
14252 /* In case of MSIX we are going to setup the misc vector right here
14253 * to handle admin queue events etc. In case of legacy and MSI
14254 * the misc functionality and queue processing is combined in
14255 * the same vector and that gets setup at open.
14256 */
14257 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
14258 err = i40e_setup_misc_vector(pf);
14259 if (err) {
14260 dev_info(&pdev->dev,
14261 "setup of misc vector failed: %d\n", err);
14262 goto err_vsis;
14263 }
14264 }
14265
df805f62 14266#ifdef CONFIG_PCI_IOV
41c445ff
JB
14267 /* prep for VF support */
14268 if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
4eb3f768 14269 (pf->flags & I40E_FLAG_MSIX_ENABLED) &&
0da36b97 14270 !test_bit(__I40E_BAD_EEPROM, pf->state)) {
41c445ff
JB
14271 /* disable link interrupts for VFs */
14272 val = rd32(hw, I40E_PFGEN_PORTMDIO_NUM);
14273 val &= ~I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_MASK;
14274 wr32(hw, I40E_PFGEN_PORTMDIO_NUM, val);
14275 i40e_flush(hw);
4aeec010
MW
14276
14277 if (pci_num_vf(pdev)) {
14278 dev_info(&pdev->dev,
14279 "Active VFs found, allocating resources.\n");
14280 err = i40e_alloc_vfs(pf, pci_num_vf(pdev));
14281 if (err)
14282 dev_info(&pdev->dev,
14283 "Error %d allocating resources for existing VFs\n",
14284 err);
14285 }
41c445ff 14286 }
df805f62 14287#endif /* CONFIG_PCI_IOV */
41c445ff 14288
e3219ce6
ASJ
14289 if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
14290 pf->iwarp_base_vector = i40e_get_lump(pf, pf->irq_pile,
14291 pf->num_iwarp_msix,
14292 I40E_IWARP_IRQ_PILE_ID);
14293 if (pf->iwarp_base_vector < 0) {
14294 dev_info(&pdev->dev,
14295 "failed to get tracking for %d vectors for IWARP err=%d\n",
14296 pf->num_iwarp_msix, pf->iwarp_base_vector);
14297 pf->flags &= ~I40E_FLAG_IWARP_ENABLED;
14298 }
14299 }
93cd765b 14300
41c445ff
JB
14301 i40e_dbg_pf_init(pf);
14302
14303 /* tell the firmware that we're starting */
44033fac 14304 i40e_send_version(pf);
41c445ff
JB
14305
14306 /* since everything's happy, start the service_task timer */
14307 mod_timer(&pf->service_timer,
14308 round_jiffies(jiffies + pf->service_timer_period));
14309
e3219ce6 14310 /* add this PF to client device list and launch a client service task */
004eb614
MW
14311 if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
14312 err = i40e_lan_add_device(pf);
14313 if (err)
14314 dev_info(&pdev->dev, "Failed to add PF to client API service list: %d\n",
14315 err);
14316 }
e3219ce6 14317
3fced535
ASJ
14318#define PCI_SPEED_SIZE 8
14319#define PCI_WIDTH_SIZE 8
14320 /* Devices on the IOSF bus do not have this information
14321 * and will report PCI Gen 1 x 1 by default so don't bother
14322 * checking them.
14323 */
d36e41dc 14324 if (!(pf->hw_features & I40E_HW_NO_PCI_LINK_CHECK)) {
3fced535
ASJ
14325 char speed[PCI_SPEED_SIZE] = "Unknown";
14326 char width[PCI_WIDTH_SIZE] = "Unknown";
14327
14328 /* Get the negotiated link width and speed from PCI config
14329 * space
14330 */
14331 pcie_capability_read_word(pf->pdev, PCI_EXP_LNKSTA,
14332 &link_status);
14333
14334 i40e_set_pci_config_data(hw, link_status);
14335
14336 switch (hw->bus.speed) {
14337 case i40e_bus_speed_8000:
4ff2d854 14338 strlcpy(speed, "8.0", PCI_SPEED_SIZE); break;
3fced535 14339 case i40e_bus_speed_5000:
4ff2d854 14340 strlcpy(speed, "5.0", PCI_SPEED_SIZE); break;
3fced535 14341 case i40e_bus_speed_2500:
4ff2d854 14342 strlcpy(speed, "2.5", PCI_SPEED_SIZE); break;
3fced535
ASJ
14343 default:
14344 break;
14345 }
14346 switch (hw->bus.width) {
14347 case i40e_bus_width_pcie_x8:
4ff2d854 14348 strlcpy(width, "8", PCI_WIDTH_SIZE); break;
3fced535 14349 case i40e_bus_width_pcie_x4:
4ff2d854 14350 strlcpy(width, "4", PCI_WIDTH_SIZE); break;
3fced535 14351 case i40e_bus_width_pcie_x2:
4ff2d854 14352 strlcpy(width, "2", PCI_WIDTH_SIZE); break;
3fced535 14353 case i40e_bus_width_pcie_x1:
4ff2d854 14354 strlcpy(width, "1", PCI_WIDTH_SIZE); break;
3fced535
ASJ
14355 default:
14356 break;
14357 }
14358
14359 dev_info(&pdev->dev, "PCI-Express: Speed %sGT/s Width x%s\n",
14360 speed, width);
14361
14362 if (hw->bus.width < i40e_bus_width_pcie_x8 ||
14363 hw->bus.speed < i40e_bus_speed_8000) {
14364 dev_warn(&pdev->dev, "PCI-Express bandwidth available for this device may be insufficient for optimal performance.\n");
14365 dev_warn(&pdev->dev, "Please move the device to a different PCI-e link with more lanes and/or higher transfer rate.\n");
14366 }
d4dfb81a
CS
14367 }
14368
e827845c
CS
14369 /* get the requested speeds from the fw */
14370 err = i40e_aq_get_phy_capabilities(hw, false, false, &abilities, NULL);
14371 if (err)
8279e495
NP
14372 dev_dbg(&pf->pdev->dev, "get requested speeds ret = %s last_status = %s\n",
14373 i40e_stat_str(&pf->hw, err),
14374 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
e827845c
CS
14375 pf->hw.phy.link_info.requested_speeds = abilities.link_speed;
14376
1d963401
DD
14377 /* set the FEC config due to the board capabilities */
14378 i40e_set_fec_in_flags(abilities.fec_cfg_curr_mod_ext_info, &pf->flags);
14379
fc72dbce
CS
14380 /* get the supported phy types from the fw */
14381 err = i40e_aq_get_phy_capabilities(hw, false, true, &abilities, NULL);
14382 if (err)
14383 dev_dbg(&pf->pdev->dev, "get supported phy types ret = %s last_status = %s\n",
14384 i40e_stat_str(&pf->hw, err),
14385 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
fc72dbce 14386
e7358f54
ASJ
14387 /* Add a filter to drop all Flow control frames from any VSI from being
14388 * transmitted. By doing so we stop a malicious VF from sending out
14389 * PAUSE or PFC frames and potentially controlling traffic for other
14390 * PF/VF VSIs.
14391 * The FW can still send Flow control frames if enabled.
14392 */
14393 i40e_add_filter_to_drop_tx_flow_control_frames(&pf->hw,
14394 pf->main_vsi_seid);
14395
31b606d0 14396 if ((pf->hw.device_id == I40E_DEV_ID_10G_BASE_T) ||
4f9b4307 14397 (pf->hw.device_id == I40E_DEV_ID_10G_BASE_T4))
d36e41dc 14398 pf->hw_features |= I40E_HW_PHY_CONTROLS_LEDS;
4ad9f4f9 14399 if (pf->hw.device_id == I40E_DEV_ID_SFP_I_X722)
d36e41dc 14400 pf->hw_features |= I40E_HW_HAVE_CRT_RETIMER;
0c22b3dd
JB
14401 /* print a string summarizing features */
14402 i40e_print_features(pf);
14403
41c445ff
JB
14404 return 0;
14405
14406 /* Unwind what we've done if something failed in the setup */
14407err_vsis:
9e6c9c0f 14408 set_bit(__I40E_DOWN, pf->state);
41c445ff
JB
14409 i40e_clear_interrupt_scheme(pf);
14410 kfree(pf->vsi);
04b03013
SN
14411err_switch_setup:
14412 i40e_reset_interrupt_capability(pf);
41c445ff
JB
14413 del_timer_sync(&pf->service_timer);
14414err_mac_addr:
14415err_configure_lan_hmc:
14416 (void)i40e_shutdown_lan_hmc(hw);
14417err_init_lan_hmc:
14418 kfree(pf->qp_pile);
41c445ff
JB
14419err_sw_init:
14420err_adminq_setup:
41c445ff
JB
14421err_pf_reset:
14422 iounmap(hw->hw_addr);
14423err_ioremap:
14424 kfree(pf);
14425err_pf_alloc:
14426 pci_disable_pcie_error_reporting(pdev);
56d766d6 14427 pci_release_mem_regions(pdev);
41c445ff
JB
14428err_pci_reg:
14429err_dma:
14430 pci_disable_device(pdev);
14431 return err;
14432}
14433
14434/**
14435 * i40e_remove - Device removal routine
14436 * @pdev: PCI device information struct
14437 *
14438 * i40e_remove is called by the PCI subsystem to alert the driver
14439 * that is should release a PCI device. This could be caused by a
14440 * Hot-Plug event, or because the driver is going to be removed from
14441 * memory.
14442 **/
14443static void i40e_remove(struct pci_dev *pdev)
14444{
14445 struct i40e_pf *pf = pci_get_drvdata(pdev);
bcab2db9 14446 struct i40e_hw *hw = &pf->hw;
41c445ff 14447 i40e_status ret_code;
41c445ff
JB
14448 int i;
14449
14450 i40e_dbg_pf_exit(pf);
14451
beb0dff1
JK
14452 i40e_ptp_stop(pf);
14453
bcab2db9 14454 /* Disable RSS in hw */
272cdaf2
SN
14455 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), 0);
14456 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), 0);
bcab2db9 14457
41c445ff 14458 /* no more scheduling of any task */
0da36b97 14459 set_bit(__I40E_SUSPENDED, pf->state);
9e6c9c0f 14460 set_bit(__I40E_DOWN, pf->state);
26566eae 14461 if (pf->service_timer.function)
c99abb4c
SN
14462 del_timer_sync(&pf->service_timer);
14463 if (pf->service_task.func)
14464 cancel_work_sync(&pf->service_task);
41c445ff 14465
921c467c
MW
14466 /* Client close must be called explicitly here because the timer
14467 * has been stopped.
14468 */
14469 i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], false);
14470
eb2d80bc
MW
14471 if (pf->flags & I40E_FLAG_SRIOV_ENABLED) {
14472 i40e_free_vfs(pf);
14473 pf->flags &= ~I40E_FLAG_SRIOV_ENABLED;
14474 }
14475
41c445ff
JB
14476 i40e_fdir_teardown(pf);
14477
14478 /* If there is a switch structure or any orphans, remove them.
14479 * This will leave only the PF's VSI remaining.
14480 */
14481 for (i = 0; i < I40E_MAX_VEB; i++) {
14482 if (!pf->veb[i])
14483 continue;
14484
14485 if (pf->veb[i]->uplink_seid == pf->mac_seid ||
14486 pf->veb[i]->uplink_seid == 0)
14487 i40e_switch_branch_release(pf->veb[i]);
14488 }
14489
14490 /* Now we can shutdown the PF's VSI, just before we kill
14491 * adminq and hmc.
14492 */
14493 if (pf->vsi[pf->lan_vsi])
14494 i40e_vsi_release(pf->vsi[pf->lan_vsi]);
14495
aaf66502
AN
14496 i40e_cloud_filter_exit(pf);
14497
e3219ce6 14498 /* remove attached clients */
004eb614
MW
14499 if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
14500 ret_code = i40e_lan_del_device(pf);
14501 if (ret_code)
14502 dev_warn(&pdev->dev, "Failed to delete client device: %d\n",
14503 ret_code);
e3219ce6
ASJ
14504 }
14505
41c445ff 14506 /* shutdown and destroy the HMC */
f734dfff
JB
14507 if (hw->hmc.hmc_obj) {
14508 ret_code = i40e_shutdown_lan_hmc(hw);
60442dea
SN
14509 if (ret_code)
14510 dev_warn(&pdev->dev,
14511 "Failed to destroy the HMC resources: %d\n",
14512 ret_code);
14513 }
41c445ff
JB
14514
14515 /* shutdown the adminq */
ac9c5c6d 14516 i40e_shutdown_adminq(hw);
41c445ff 14517
8ddb3326
JB
14518 /* destroy the locks only once, here */
14519 mutex_destroy(&hw->aq.arq_mutex);
14520 mutex_destroy(&hw->aq.asq_mutex);
14521
41c445ff 14522 /* Clear all dynamic memory lists of rings, q_vectors, and VSIs */
5cba17b1 14523 rtnl_lock();
41c445ff 14524 i40e_clear_interrupt_scheme(pf);
505682cd 14525 for (i = 0; i < pf->num_alloc_vsi; i++) {
41c445ff
JB
14526 if (pf->vsi[i]) {
14527 i40e_vsi_clear_rings(pf->vsi[i]);
14528 i40e_vsi_clear(pf->vsi[i]);
14529 pf->vsi[i] = NULL;
14530 }
14531 }
5cba17b1 14532 rtnl_unlock();
41c445ff
JB
14533
14534 for (i = 0; i < I40E_MAX_VEB; i++) {
14535 kfree(pf->veb[i]);
14536 pf->veb[i] = NULL;
14537 }
14538
14539 kfree(pf->qp_pile);
41c445ff
JB
14540 kfree(pf->vsi);
14541
f734dfff 14542 iounmap(hw->hw_addr);
41c445ff 14543 kfree(pf);
56d766d6 14544 pci_release_mem_regions(pdev);
41c445ff
JB
14545
14546 pci_disable_pcie_error_reporting(pdev);
14547 pci_disable_device(pdev);
14548}
14549
14550/**
14551 * i40e_pci_error_detected - warning that something funky happened in PCI land
14552 * @pdev: PCI device information struct
f5254429 14553 * @error: the type of PCI error
41c445ff
JB
14554 *
14555 * Called to warn that something happened and the error handling steps
14556 * are in progress. Allows the driver to quiesce things, be ready for
14557 * remediation.
14558 **/
14559static pci_ers_result_t i40e_pci_error_detected(struct pci_dev *pdev,
14560 enum pci_channel_state error)
14561{
14562 struct i40e_pf *pf = pci_get_drvdata(pdev);
14563
14564 dev_info(&pdev->dev, "%s: error %d\n", __func__, error);
14565
edfc23ee
GP
14566 if (!pf) {
14567 dev_info(&pdev->dev,
14568 "Cannot recover - error happened during device probe\n");
14569 return PCI_ERS_RESULT_DISCONNECT;
14570 }
14571
41c445ff 14572 /* shutdown all operations */
dfc4ff64
JK
14573 if (!test_bit(__I40E_SUSPENDED, pf->state))
14574 i40e_prep_for_reset(pf, false);
41c445ff
JB
14575
14576 /* Request a slot reset */
14577 return PCI_ERS_RESULT_NEED_RESET;
14578}
14579
14580/**
14581 * i40e_pci_error_slot_reset - a PCI slot reset just happened
14582 * @pdev: PCI device information struct
14583 *
14584 * Called to find if the driver can work with the device now that
14585 * the pci slot has been reset. If a basic connection seems good
14586 * (registers are readable and have sane content) then return a
14587 * happy little PCI_ERS_RESULT_xxx.
14588 **/
14589static pci_ers_result_t i40e_pci_error_slot_reset(struct pci_dev *pdev)
14590{
14591 struct i40e_pf *pf = pci_get_drvdata(pdev);
14592 pci_ers_result_t result;
41c445ff
JB
14593 u32 reg;
14594
fb43201f 14595 dev_dbg(&pdev->dev, "%s\n", __func__);
41c445ff
JB
14596 if (pci_enable_device_mem(pdev)) {
14597 dev_info(&pdev->dev,
14598 "Cannot re-enable PCI device after reset.\n");
14599 result = PCI_ERS_RESULT_DISCONNECT;
14600 } else {
14601 pci_set_master(pdev);
14602 pci_restore_state(pdev);
14603 pci_save_state(pdev);
14604 pci_wake_from_d3(pdev, false);
14605
14606 reg = rd32(&pf->hw, I40E_GLGEN_RTRIG);
14607 if (reg == 0)
14608 result = PCI_ERS_RESULT_RECOVERED;
14609 else
14610 result = PCI_ERS_RESULT_DISCONNECT;
14611 }
14612
41c445ff
JB
14613 return result;
14614}
14615
19b7960b
AB
14616/**
14617 * i40e_pci_error_reset_prepare - prepare device driver for pci reset
14618 * @pdev: PCI device information struct
14619 */
14620static void i40e_pci_error_reset_prepare(struct pci_dev *pdev)
14621{
14622 struct i40e_pf *pf = pci_get_drvdata(pdev);
14623
14624 i40e_prep_for_reset(pf, false);
14625}
14626
14627/**
14628 * i40e_pci_error_reset_done - pci reset done, device driver reset can begin
14629 * @pdev: PCI device information struct
14630 */
14631static void i40e_pci_error_reset_done(struct pci_dev *pdev)
14632{
14633 struct i40e_pf *pf = pci_get_drvdata(pdev);
14634
14635 i40e_reset_and_rebuild(pf, false, false);
14636}
14637
41c445ff
JB
14638/**
14639 * i40e_pci_error_resume - restart operations after PCI error recovery
14640 * @pdev: PCI device information struct
14641 *
14642 * Called to allow the driver to bring things back up after PCI error
14643 * and/or reset recovery has finished.
14644 **/
14645static void i40e_pci_error_resume(struct pci_dev *pdev)
14646{
14647 struct i40e_pf *pf = pci_get_drvdata(pdev);
14648
fb43201f 14649 dev_dbg(&pdev->dev, "%s\n", __func__);
0da36b97 14650 if (test_bit(__I40E_SUSPENDED, pf->state))
9007bccd
SN
14651 return;
14652
dfc4ff64 14653 i40e_handle_reset_warning(pf, false);
9007bccd
SN
14654}
14655
1d68005d
JH
14656/**
14657 * i40e_enable_mc_magic_wake - enable multicast magic packet wake up
14658 * using the mac_address_write admin q function
14659 * @pf: pointer to i40e_pf struct
14660 **/
14661static void i40e_enable_mc_magic_wake(struct i40e_pf *pf)
14662{
14663 struct i40e_hw *hw = &pf->hw;
14664 i40e_status ret;
14665 u8 mac_addr[6];
14666 u16 flags = 0;
14667
14668 /* Get current MAC address in case it's an LAA */
14669 if (pf->vsi[pf->lan_vsi] && pf->vsi[pf->lan_vsi]->netdev) {
14670 ether_addr_copy(mac_addr,
14671 pf->vsi[pf->lan_vsi]->netdev->dev_addr);
14672 } else {
14673 dev_err(&pf->pdev->dev,
14674 "Failed to retrieve MAC address; using default\n");
14675 ether_addr_copy(mac_addr, hw->mac.addr);
14676 }
14677
14678 /* The FW expects the mac address write cmd to first be called with
14679 * one of these flags before calling it again with the multicast
14680 * enable flags.
14681 */
14682 flags = I40E_AQC_WRITE_TYPE_LAA_WOL;
14683
14684 if (hw->func_caps.flex10_enable && hw->partition_id != 1)
14685 flags = I40E_AQC_WRITE_TYPE_LAA_ONLY;
14686
14687 ret = i40e_aq_mac_address_write(hw, flags, mac_addr, NULL);
14688 if (ret) {
14689 dev_err(&pf->pdev->dev,
14690 "Failed to update MAC address registers; cannot enable Multicast Magic packet wake up");
14691 return;
14692 }
14693
14694 flags = I40E_AQC_MC_MAG_EN
14695 | I40E_AQC_WOL_PRESERVE_ON_PFR
14696 | I40E_AQC_WRITE_TYPE_UPDATE_MC_MAG;
14697 ret = i40e_aq_mac_address_write(hw, flags, mac_addr, NULL);
14698 if (ret)
14699 dev_err(&pf->pdev->dev,
14700 "Failed to enable Multicast Magic Packet wake up\n");
14701}
14702
9007bccd
SN
14703/**
14704 * i40e_shutdown - PCI callback for shutting down
14705 * @pdev: PCI device information struct
14706 **/
14707static void i40e_shutdown(struct pci_dev *pdev)
14708{
14709 struct i40e_pf *pf = pci_get_drvdata(pdev);
8e2773ae 14710 struct i40e_hw *hw = &pf->hw;
9007bccd 14711
0da36b97 14712 set_bit(__I40E_SUSPENDED, pf->state);
9e6c9c0f 14713 set_bit(__I40E_DOWN, pf->state);
8e2773ae 14714
02b42498
CS
14715 del_timer_sync(&pf->service_timer);
14716 cancel_work_sync(&pf->service_task);
aaf66502 14717 i40e_cloud_filter_exit(pf);
02b42498
CS
14718 i40e_fdir_teardown(pf);
14719
921c467c
MW
14720 /* Client close must be called explicitly here because the timer
14721 * has been stopped.
14722 */
14723 i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], false);
14724
d36e41dc 14725 if (pf->wol_en && (pf->hw_features & I40E_HW_WOL_MC_MAGIC_PKT_WAKE))
1d68005d
JH
14726 i40e_enable_mc_magic_wake(pf);
14727
dfc4ff64 14728 i40e_prep_for_reset(pf, false);
02b42498
CS
14729
14730 wr32(hw, I40E_PFPM_APM,
14731 (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
14732 wr32(hw, I40E_PFPM_WUFC,
14733 (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
14734
5cba17b1
PM
14735 /* Since we're going to destroy queues during the
14736 * i40e_clear_interrupt_scheme() we should hold the RTNL lock for this
14737 * whole section
14738 */
14739 rtnl_lock();
e147758d 14740 i40e_clear_interrupt_scheme(pf);
5cba17b1 14741 rtnl_unlock();
e147758d 14742
9007bccd 14743 if (system_state == SYSTEM_POWER_OFF) {
8e2773ae 14744 pci_wake_from_d3(pdev, pf->wol_en);
9007bccd
SN
14745 pci_set_power_state(pdev, PCI_D3hot);
14746 }
14747}
14748
9007bccd 14749/**
0e5d3da4
JK
14750 * i40e_suspend - PM callback for moving to D3
14751 * @dev: generic device information structure
9007bccd 14752 **/
254d152a 14753static int __maybe_unused i40e_suspend(struct device *dev)
9007bccd 14754{
0e5d3da4 14755 struct pci_dev *pdev = to_pci_dev(dev);
9007bccd 14756 struct i40e_pf *pf = pci_get_drvdata(pdev);
8e2773ae 14757 struct i40e_hw *hw = &pf->hw;
9007bccd 14758
401586c2
JK
14759 /* If we're already suspended, then there is nothing to do */
14760 if (test_and_set_bit(__I40E_SUSPENDED, pf->state))
14761 return 0;
14762
9e6c9c0f 14763 set_bit(__I40E_DOWN, pf->state);
3932dbfe 14764
5c499228
JK
14765 /* Ensure service task will not be running */
14766 del_timer_sync(&pf->service_timer);
14767 cancel_work_sync(&pf->service_task);
14768
ddbb8d5d
SS
14769 /* Client close must be called explicitly here because the timer
14770 * has been stopped.
14771 */
14772 i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], false);
14773
d36e41dc 14774 if (pf->wol_en && (pf->hw_features & I40E_HW_WOL_MC_MAGIC_PKT_WAKE))
1d68005d
JH
14775 i40e_enable_mc_magic_wake(pf);
14776
f0ee70a0
JK
14777 /* Since we're going to destroy queues during the
14778 * i40e_clear_interrupt_scheme() we should hold the RTNL lock for this
14779 * whole section
14780 */
14781 rtnl_lock();
14782
14783 i40e_prep_for_reset(pf, true);
9007bccd 14784
8e2773ae
SN
14785 wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
14786 wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
14787
b980c063
JK
14788 /* Clear the interrupt scheme and release our IRQs so that the system
14789 * can safely hibernate even when there are a large number of CPUs.
14790 * Otherwise hibernation might fail when mapping all the vectors back
14791 * to CPU0.
14792 */
14793 i40e_clear_interrupt_scheme(pf);
9007bccd 14794
f0ee70a0
JK
14795 rtnl_unlock();
14796
0e5d3da4 14797 return 0;
41c445ff
JB
14798}
14799
9007bccd 14800/**
0e5d3da4
JK
14801 * i40e_resume - PM callback for waking up from D3
14802 * @dev: generic device information structure
9007bccd 14803 **/
254d152a 14804static int __maybe_unused i40e_resume(struct device *dev)
9007bccd 14805{
0e5d3da4 14806 struct pci_dev *pdev = to_pci_dev(dev);
9007bccd 14807 struct i40e_pf *pf = pci_get_drvdata(pdev);
b980c063 14808 int err;
9007bccd 14809
401586c2
JK
14810 /* If we're not suspended, then there is nothing to do */
14811 if (!test_bit(__I40E_SUSPENDED, pf->state))
14812 return 0;
9007bccd 14813
f0ee70a0
JK
14814 /* We need to hold the RTNL lock prior to restoring interrupt schemes,
14815 * since we're going to be restoring queues
14816 */
14817 rtnl_lock();
14818
b980c063
JK
14819 /* We cleared the interrupt scheme when we suspended, so we need to
14820 * restore it now to resume device functionality.
14821 */
14822 err = i40e_restore_interrupt_scheme(pf);
9007bccd 14823 if (err) {
b980c063
JK
14824 dev_err(&pdev->dev, "Cannot restore interrupt scheme: %d\n",
14825 err);
9007bccd 14826 }
9007bccd 14827
401586c2 14828 clear_bit(__I40E_DOWN, pf->state);
f0ee70a0
JK
14829 i40e_reset_and_rebuild(pf, false, true);
14830
14831 rtnl_unlock();
401586c2
JK
14832
14833 /* Clear suspended state last after everything is recovered */
14834 clear_bit(__I40E_SUSPENDED, pf->state);
9007bccd 14835
5c499228
JK
14836 /* Restart the service task */
14837 mod_timer(&pf->service_timer,
14838 round_jiffies(jiffies + pf->service_timer_period));
9007bccd
SN
14839
14840 return 0;
14841}
14842
41c445ff
JB
14843static const struct pci_error_handlers i40e_err_handler = {
14844 .error_detected = i40e_pci_error_detected,
14845 .slot_reset = i40e_pci_error_slot_reset,
19b7960b
AB
14846 .reset_prepare = i40e_pci_error_reset_prepare,
14847 .reset_done = i40e_pci_error_reset_done,
41c445ff
JB
14848 .resume = i40e_pci_error_resume,
14849};
14850
0e5d3da4
JK
14851static SIMPLE_DEV_PM_OPS(i40e_pm_ops, i40e_suspend, i40e_resume);
14852
41c445ff
JB
14853static struct pci_driver i40e_driver = {
14854 .name = i40e_driver_name,
14855 .id_table = i40e_pci_tbl,
14856 .probe = i40e_probe,
14857 .remove = i40e_remove,
0e5d3da4
JK
14858 .driver = {
14859 .pm = &i40e_pm_ops,
14860 },
9007bccd 14861 .shutdown = i40e_shutdown,
41c445ff
JB
14862 .err_handler = &i40e_err_handler,
14863 .sriov_configure = i40e_pci_sriov_configure,
14864};
14865
14866/**
14867 * i40e_init_module - Driver registration routine
14868 *
14869 * i40e_init_module is the first routine called when the driver is
14870 * loaded. All it does is register with the PCI subsystem.
14871 **/
14872static int __init i40e_init_module(void)
14873{
14874 pr_info("%s: %s - version %s\n", i40e_driver_name,
14875 i40e_driver_string, i40e_driver_version_str);
14876 pr_info("%s: %s\n", i40e_driver_name, i40e_copyright);
96664483 14877
4d5957cb
JK
14878 /* There is no need to throttle the number of active tasks because
14879 * each device limits its own task using a state bit for scheduling
14880 * the service task, and the device tasks do not interfere with each
14881 * other, so we don't set a max task limit. We must set WQ_MEM_RECLAIM
14882 * since we need to be able to guarantee forward progress even under
14883 * memory pressure.
2803b16c 14884 */
4d5957cb 14885 i40e_wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0, i40e_driver_name);
2803b16c
JB
14886 if (!i40e_wq) {
14887 pr_err("%s: Failed to create workqueue\n", i40e_driver_name);
14888 return -ENOMEM;
14889 }
14890
41c445ff
JB
14891 i40e_dbg_init();
14892 return pci_register_driver(&i40e_driver);
14893}
14894module_init(i40e_init_module);
14895
14896/**
14897 * i40e_exit_module - Driver exit cleanup routine
14898 *
14899 * i40e_exit_module is called just before the driver is removed
14900 * from memory.
14901 **/
14902static void __exit i40e_exit_module(void)
14903{
14904 pci_unregister_driver(&i40e_driver);
2803b16c 14905 destroy_workqueue(i40e_wq);
41c445ff
JB
14906 i40e_dbg_exit();
14907}
14908module_exit(i40e_exit_module);