net: aquantia: linkstate irq should be oneshot
[linux-2.6-block.git] / drivers / net / ethernet / aquantia / atlantic / aq_nic.c
CommitLineData
75a6faf6 1// SPDX-License-Identifier: GPL-2.0-only
97bde5c4
DV
2/*
3 * aQuantia Corporation Network Driver
4 * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
97bde5c4
DV
5 */
6
7/* File aq_nic.c: Definition of common code for NIC. */
8
9#include "aq_nic.h"
10#include "aq_ring.h"
11#include "aq_vec.h"
12#include "aq_hw.h"
13#include "aq_pci_func.h"
4c83f170 14#include "aq_main.h"
97bde5c4 15
b82ee71a 16#include <linux/moduleparam.h>
97bde5c4
DV
17#include <linux/netdevice.h>
18#include <linux/etherdevice.h>
19#include <linux/timer.h>
20#include <linux/cpu.h>
21#include <linux/ip.h>
22#include <linux/tcp.h>
23#include <net/ip.h>
24
b82ee71a
IR
25static unsigned int aq_itr = AQ_CFG_INTERRUPT_MODERATION_AUTO;
26module_param_named(aq_itr, aq_itr, uint, 0644);
27MODULE_PARM_DESC(aq_itr, "Interrupt throttling mode");
28
29static unsigned int aq_itr_tx;
30module_param_named(aq_itr_tx, aq_itr_tx, uint, 0644);
31MODULE_PARM_DESC(aq_itr_tx, "TX interrupt throttle rate");
32
33static unsigned int aq_itr_rx;
34module_param_named(aq_itr_rx, aq_itr_rx, uint, 0644);
35MODULE_PARM_DESC(aq_itr_rx, "RX interrupt throttle rate");
36
9f8a2203
IR
37static void aq_nic_update_ndev_stats(struct aq_nic_s *self);
38
97bde5c4
DV
39static void aq_nic_rss_init(struct aq_nic_s *self, unsigned int num_rss_queues)
40{
41 struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg;
42 struct aq_rss_parameters *rss_params = &cfg->aq_rss;
43 int i = 0;
44
474fb115 45 static u8 rss_key[AQ_CFG_RSS_HASHKEY_SIZE] = {
97bde5c4
DV
46 0x1e, 0xad, 0x71, 0x87, 0x65, 0xfc, 0x26, 0x7d,
47 0x0d, 0x45, 0x67, 0x74, 0xcd, 0x06, 0x1a, 0x18,
48 0xb6, 0xc1, 0xf0, 0xc7, 0xbb, 0x18, 0xbe, 0xf8,
49 0x19, 0x13, 0x4b, 0xa9, 0xd0, 0x3e, 0xfe, 0x70,
50 0x25, 0x03, 0xab, 0x50, 0x6a, 0x8b, 0x82, 0x0c
51 };
52
53 rss_params->hash_secret_key_size = sizeof(rss_key);
54 memcpy(rss_params->hash_secret_key, rss_key, sizeof(rss_key));
55 rss_params->indirection_table_size = AQ_CFG_RSS_INDIRECTION_TABLE_MAX;
56
57 for (i = rss_params->indirection_table_size; i--;)
58 rss_params->indirection_table[i] = i & (num_rss_queues - 1);
59}
60
23ee07ad
IR
61/* Checks hw_caps and 'corrects' aq_nic_cfg in runtime */
62void aq_nic_cfg_start(struct aq_nic_s *self)
97bde5c4
DV
63{
64 struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg;
65
97bde5c4
DV
66 cfg->tcs = AQ_CFG_TCS_DEF;
67
97bde5c4
DV
68 cfg->is_polling = AQ_CFG_IS_POLLING_DEF;
69
b82ee71a
IR
70 cfg->itr = aq_itr;
71 cfg->tx_itr = aq_itr_tx;
72 cfg->rx_itr = aq_itr_rx;
97bde5c4 73
46f4c29d 74 cfg->rxpageorder = AQ_CFG_RX_PAGEORDER;
97bde5c4
DV
75 cfg->is_rss = AQ_CFG_IS_RSS_DEF;
76 cfg->num_rss_queues = AQ_CFG_NUM_RSS_QUEUES_DEF;
77 cfg->aq_rss.base_cpu_number = AQ_CFG_RSS_BASE_CPU_NUM_DEF;
78 cfg->flow_control = AQ_CFG_FC_MODE;
79
80 cfg->mtu = AQ_CFG_MTU_DEF;
81 cfg->link_speed_msk = AQ_CFG_SPEED_MSK;
82 cfg->is_autoneg = AQ_CFG_IS_AUTONEG_DEF;
83
84 cfg->is_lro = AQ_CFG_IS_LRO_DEF;
85
97bde5c4 86 /*descriptors */
c1af5427
AM
87 cfg->rxds = min(cfg->aq_hw_caps->rxds_max, AQ_CFG_RXDS_DEF);
88 cfg->txds = min(cfg->aq_hw_caps->txds_max, AQ_CFG_TXDS_DEF);
97bde5c4
DV
89
90 /*rss rings */
23ee07ad 91 cfg->vecs = min(cfg->aq_hw_caps->vecs, AQ_CFG_VECS_DEF);
97bde5c4 92 cfg->vecs = min(cfg->vecs, num_online_cpus());
4c83f170
IR
93 if (self->irqvecs > AQ_HW_SERVICE_IRQS)
94 cfg->vecs = min(cfg->vecs, self->irqvecs - AQ_HW_SERVICE_IRQS);
97bde5c4
DV
95 /* cfg->vecs should be power of 2 for RSS */
96 if (cfg->vecs >= 8U)
97 cfg->vecs = 8U;
98 else if (cfg->vecs >= 4U)
99 cfg->vecs = 4U;
100 else if (cfg->vecs >= 2U)
101 cfg->vecs = 2U;
102 else
103 cfg->vecs = 1U;
104
64fc7953
PB
105 cfg->num_rss_queues = min(cfg->vecs, AQ_CFG_NUM_RSS_QUEUES_DEF);
106
a8c69ca7
DB
107 aq_nic_rss_init(self, cfg->num_rss_queues);
108
23ee07ad 109 cfg->irq_type = aq_pci_func_get_irq_type(self);
97bde5c4
DV
110
111 if ((cfg->irq_type == AQ_HW_IRQ_LEGACY) ||
8fcb98f4 112 (cfg->aq_hw_caps->vecs == 1U) ||
97bde5c4
DV
113 (cfg->vecs == 1U)) {
114 cfg->is_rss = 0U;
115 cfg->vecs = 1U;
116 }
117
4c83f170
IR
118 /* Check if we have enough vectors allocated for
119 * link status IRQ. If no - we'll know link state from
120 * slower service task.
121 */
122 if (AQ_HW_SERVICE_IRQS > 0 && cfg->vecs + 1 <= self->irqvecs)
123 cfg->link_irq_vec = cfg->vecs;
124 else
125 cfg->link_irq_vec = 0;
126
8fcb98f4 127 cfg->link_speed_msk &= cfg->aq_hw_caps->link_speed_msk;
bbb67a44 128 cfg->features = cfg->aq_hw_caps->hw_features;
d3ed7c5c
IR
129 cfg->is_vlan_rx_strip = !!(cfg->features & NETIF_F_HW_VLAN_CTAG_RX);
130 cfg->is_vlan_tx_insert = !!(cfg->features & NETIF_F_HW_VLAN_CTAG_TX);
48dd73d0 131 cfg->is_vlan_force_promisc = true;
97bde5c4
DV
132}
133
3aec6412
IR
134static int aq_nic_update_link_status(struct aq_nic_s *self)
135{
0c58c35f 136 int err = self->aq_fw_ops->update_link_status(self->aq_hw);
35e8e8b4 137 u32 fc = 0;
3aec6412
IR
138
139 if (err)
140 return err;
141
b82ee71a 142 if (self->link_status.mbps != self->aq_hw->aq_link_status.mbps) {
3aec6412
IR
143 pr_info("%s: link change old %d new %d\n",
144 AQ_CFG_DRV_NAME, self->link_status.mbps,
145 self->aq_hw->aq_link_status.mbps);
b82ee71a 146 aq_nic_update_interrupt_moderation_settings(self);
35e8e8b4
IR
147
148 /* Driver has to update flow control settings on RX block
149 * on any link event.
150 * We should query FW whether it negotiated FC.
151 */
152 if (self->aq_fw_ops->get_flow_control)
153 self->aq_fw_ops->get_flow_control(self->aq_hw, &fc);
154 if (self->aq_hw_ops->hw_set_fc)
155 self->aq_hw_ops->hw_set_fc(self->aq_hw, fc, 0);
b82ee71a 156 }
3aec6412
IR
157
158 self->link_status = self->aq_hw->aq_link_status;
159 if (!netif_carrier_ok(self->ndev) && self->link_status.mbps) {
78f5193d 160 aq_utils_obj_set(&self->flags,
3aec6412 161 AQ_NIC_FLAG_STARTED);
78f5193d 162 aq_utils_obj_clear(&self->flags,
3aec6412
IR
163 AQ_NIC_LINK_DOWN);
164 netif_carrier_on(self->ndev);
165 netif_tx_wake_all_queues(self->ndev);
166 }
167 if (netif_carrier_ok(self->ndev) && !self->link_status.mbps) {
168 netif_carrier_off(self->ndev);
169 netif_tx_disable(self->ndev);
78f5193d 170 aq_utils_obj_set(&self->flags, AQ_NIC_LINK_DOWN);
3aec6412
IR
171 }
172 return 0;
173}
174
1d2a8a13
IR
175static irqreturn_t aq_linkstate_threaded_isr(int irq, void *private)
176{
177 struct aq_nic_s *self = private;
178
179 if (!self)
180 return IRQ_NONE;
181
182 aq_nic_update_link_status(self);
183
184 self->aq_hw_ops->hw_irq_enable(self->aq_hw,
185 BIT(self->aq_nic_cfg.link_irq_vec));
186 return IRQ_HANDLED;
187}
188
49544935 189static void aq_nic_service_task(struct work_struct *work)
97bde5c4 190{
49544935
IR
191 struct aq_nic_s *self = container_of(work, struct aq_nic_s,
192 service_task);
193 int err;
97bde5c4 194
78f5193d 195 if (aq_utils_obj_test(&self->flags, AQ_NIC_FLAGS_IS_NOT_READY))
49544935 196 return;
97bde5c4 197
3aec6412
IR
198 err = aq_nic_update_link_status(self);
199 if (err)
49544935 200 return;
97bde5c4 201
49544935 202 mutex_lock(&self->fwreq_mutex);
0c58c35f
IR
203 if (self->aq_fw_ops->update_stats)
204 self->aq_fw_ops->update_stats(self->aq_hw);
49544935 205 mutex_unlock(&self->fwreq_mutex);
65e665e6 206
9f8a2203 207 aq_nic_update_ndev_stats(self);
49544935
IR
208}
209
210static void aq_nic_service_timer_cb(struct timer_list *t)
211{
212 struct aq_nic_s *self = from_timer(self, t, service_timer);
97bde5c4 213
4c83f170 214 mod_timer(&self->service_timer, jiffies + AQ_CFG_SERVICE_TIMER_INTERVAL);
49544935
IR
215
216 aq_ndev_schedule_work(&self->service_task);
97bde5c4
DV
217}
218
e99e88a9 219static void aq_nic_polling_timer_cb(struct timer_list *t)
97bde5c4 220{
e99e88a9 221 struct aq_nic_s *self = from_timer(self, t, polling_timer);
97bde5c4
DV
222 struct aq_vec_s *aq_vec = NULL;
223 unsigned int i = 0U;
224
225 for (i = 0U, aq_vec = self->aq_vec[0];
226 self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i])
227 aq_vec_isr(i, (void *)aq_vec);
228
229 mod_timer(&self->polling_timer, jiffies +
e9157848 230 AQ_CFG_POLLING_TIMER_INTERVAL);
97bde5c4
DV
231}
232
97bde5c4
DV
233int aq_nic_ndev_register(struct aq_nic_s *self)
234{
235 int err = 0;
97bde5c4
DV
236
237 if (!self->ndev) {
238 err = -EINVAL;
239 goto err_exit;
240 }
23ee07ad 241
0c58c35f
IR
242 err = hw_atl_utils_initfw(self->aq_hw, &self->aq_fw_ops);
243 if (err)
244 goto err_exit;
245
f5dce08a 246 mutex_lock(&self->fwreq_mutex);
0c58c35f 247 err = self->aq_fw_ops->get_mac_permanent(self->aq_hw,
97bde5c4 248 self->ndev->dev_addr);
f5dce08a 249 mutex_unlock(&self->fwreq_mutex);
23ee07ad 250 if (err)
97bde5c4
DV
251 goto err_exit;
252
253#if defined(AQ_CFG_MAC_ADDR_PERMANENT)
254 {
255 static u8 mac_addr_permanent[] = AQ_CFG_MAC_ADDR_PERMANENT;
256
257 ether_addr_copy(self->ndev->dev_addr, mac_addr_permanent);
258 }
259#endif
97bde5c4 260
23ee07ad
IR
261 for (self->aq_vecs = 0; self->aq_vecs < aq_nic_get_cfg(self)->vecs;
262 self->aq_vecs++) {
263 self->aq_vec[self->aq_vecs] =
264 aq_vec_alloc(self, self->aq_vecs, aq_nic_get_cfg(self));
265 if (!self->aq_vec[self->aq_vecs]) {
266 err = -ENOMEM;
267 goto err_exit;
268 }
269 }
270
97bde5c4
DV
271 netif_carrier_off(self->ndev);
272
3aec6412 273 netif_tx_disable(self->ndev);
97bde5c4 274
55629109 275 err = register_netdev(self->ndev);
23ee07ad 276 if (err)
55629109
PB
277 goto err_exit;
278
97bde5c4
DV
279err_exit:
280 return err;
281}
282
23ee07ad 283void aq_nic_ndev_init(struct aq_nic_s *self)
97bde5c4 284{
4cbc9f92 285 const struct aq_hw_caps_s *aq_hw_caps = self->aq_nic_cfg.aq_hw_caps;
97bde5c4
DV
286 struct aq_nic_cfg_s *aq_nic_cfg = &self->aq_nic_cfg;
287
288 self->ndev->hw_features |= aq_hw_caps->hw_features;
289 self->ndev->features = aq_hw_caps->hw_features;
8c61ab7f 290 self->ndev->vlan_features |= NETIF_F_HW_CSUM | NETIF_F_RXCSUM |
d3ed7c5c
IR
291 NETIF_F_RXHASH | NETIF_F_SG |
292 NETIF_F_LRO | NETIF_F_TSO;
97bde5c4 293 self->ndev->priv_flags = aq_hw_caps->hw_priv_flags;
3e9a5451
IR
294 self->ndev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
295
97bde5c4 296 self->ndev->mtu = aq_nic_cfg->mtu - ETH_HLEN;
8fcb98f4 297 self->ndev->max_mtu = aq_hw_caps->mtu - ETH_FCS_LEN - ETH_HLEN;
97bde5c4 298
97bde5c4
DV
299}
300
301void aq_nic_set_tx_ring(struct aq_nic_s *self, unsigned int idx,
302 struct aq_ring_s *ring)
303{
304 self->aq_ring_tx[idx] = ring;
305}
306
97bde5c4
DV
307struct net_device *aq_nic_get_ndev(struct aq_nic_s *self)
308{
309 return self->ndev;
310}
311
312int aq_nic_init(struct aq_nic_s *self)
313{
314 struct aq_vec_s *aq_vec = NULL;
315 int err = 0;
316 unsigned int i = 0U;
317
318 self->power_state = AQ_HW_POWER_STATE_D0;
f5dce08a 319 mutex_lock(&self->fwreq_mutex);
8fcb98f4 320 err = self->aq_hw_ops->hw_reset(self->aq_hw);
f5dce08a 321 mutex_unlock(&self->fwreq_mutex);
97bde5c4
DV
322 if (err < 0)
323 goto err_exit;
324
8fcb98f4 325 err = self->aq_hw_ops->hw_init(self->aq_hw,
23ee07ad 326 aq_nic_get_ndev(self)->dev_addr);
97bde5c4
DV
327 if (err < 0)
328 goto err_exit;
329
330 for (i = 0U, aq_vec = self->aq_vec[0];
331 self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i])
8fcb98f4 332 aq_vec_init(aq_vec, self->aq_hw_ops, self->aq_hw);
97bde5c4 333
23ee07ad
IR
334 netif_carrier_off(self->ndev);
335
97bde5c4
DV
336err_exit:
337 return err;
338}
339
97bde5c4
DV
340int aq_nic_start(struct aq_nic_s *self)
341{
342 struct aq_vec_s *aq_vec = NULL;
343 int err = 0;
344 unsigned int i = 0U;
345
8fcb98f4 346 err = self->aq_hw_ops->hw_multicast_list_set(self->aq_hw,
e9157848
ND
347 self->mc_list.ar,
348 self->mc_list.count);
97bde5c4
DV
349 if (err < 0)
350 goto err_exit;
351
8fcb98f4 352 err = self->aq_hw_ops->hw_packet_filter_set(self->aq_hw,
e9157848 353 self->packet_filter);
97bde5c4
DV
354 if (err < 0)
355 goto err_exit;
356
357 for (i = 0U, aq_vec = self->aq_vec[0];
358 self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i]) {
359 err = aq_vec_start(aq_vec);
360 if (err < 0)
361 goto err_exit;
362 }
363
8fcb98f4 364 err = self->aq_hw_ops->hw_start(self->aq_hw);
97bde5c4
DV
365 if (err < 0)
366 goto err_exit;
367
b82ee71a
IR
368 err = aq_nic_update_interrupt_moderation_settings(self);
369 if (err)
97bde5c4 370 goto err_exit;
49544935
IR
371
372 INIT_WORK(&self->service_task, aq_nic_service_task);
373
e99e88a9 374 timer_setup(&self->service_timer, aq_nic_service_timer_cb, 0);
67758788 375 aq_nic_service_timer_cb(&self->service_timer);
97bde5c4
DV
376
377 if (self->aq_nic_cfg.is_polling) {
e99e88a9 378 timer_setup(&self->polling_timer, aq_nic_polling_timer_cb, 0);
97bde5c4
DV
379 mod_timer(&self->polling_timer, jiffies +
380 AQ_CFG_POLLING_TIMER_INTERVAL);
381 } else {
382 for (i = 0U, aq_vec = self->aq_vec[0];
383 self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i]) {
4c83f170
IR
384 err = aq_pci_func_alloc_irq(self, i, self->ndev->name,
385 aq_vec_isr, aq_vec,
23ee07ad 386 aq_vec_get_affinity_mask(aq_vec));
97bde5c4
DV
387 if (err < 0)
388 goto err_exit;
389 }
390
4c83f170
IR
391 if (self->aq_nic_cfg.link_irq_vec) {
392 int irqvec = pci_irq_vector(self->pdev,
393 self->aq_nic_cfg.link_irq_vec);
394 err = request_threaded_irq(irqvec, NULL,
395 aq_linkstate_threaded_isr,
5c47e3ba 396 IRQF_SHARED | IRQF_ONESHOT,
4c83f170
IR
397 self->ndev->name, self);
398 if (err < 0)
399 goto err_exit;
400 self->msix_entry_mask |= (1 << self->aq_nic_cfg.link_irq_vec);
401 }
402
8fcb98f4 403 err = self->aq_hw_ops->hw_irq_enable(self->aq_hw,
e9157848 404 AQ_CFG_IRQ_MASK);
97bde5c4
DV
405 if (err < 0)
406 goto err_exit;
407 }
408
97bde5c4
DV
409 err = netif_set_real_num_tx_queues(self->ndev, self->aq_vecs);
410 if (err < 0)
411 goto err_exit;
412
413 err = netif_set_real_num_rx_queues(self->ndev, self->aq_vecs);
414 if (err < 0)
415 goto err_exit;
416
3aec6412
IR
417 netif_tx_start_all_queues(self->ndev);
418
97bde5c4
DV
419err_exit:
420 return err;
421}
422
e399553d
PB
423static unsigned int aq_nic_map_skb(struct aq_nic_s *self,
424 struct sk_buff *skb,
425 struct aq_ring_s *ring)
97bde5c4
DV
426{
427 unsigned int ret = 0U;
428 unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
429 unsigned int frag_count = 0U;
e399553d 430 unsigned int dx = ring->sw_tail;
c7545689 431 struct aq_ring_buff_s *first = NULL;
e399553d 432 struct aq_ring_buff_s *dx_buff = &ring->buff_ring[dx];
880b3ca5
IR
433 bool need_context_tag = false;
434
435 dx_buff->flags = 0U;
97bde5c4 436
e399553d 437 if (unlikely(skb_is_gso(skb))) {
880b3ca5
IR
438 dx_buff->mss = skb_shinfo(skb)->gso_size;
439 dx_buff->is_gso = 1U;
e399553d
PB
440 dx_buff->len_pkt = skb->len;
441 dx_buff->len_l2 = ETH_HLEN;
442 dx_buff->len_l3 = ip_hdrlen(skb);
443 dx_buff->len_l4 = tcp_hdrlen(skb);
c7545689 444 dx_buff->eop_index = 0xffffU;
386aff88
PB
445 dx_buff->is_ipv6 =
446 (ip_hdr(skb)->version == 6) ? 1U : 0U;
880b3ca5
IR
447 need_context_tag = true;
448 }
386aff88 449
880b3ca5
IR
450 if (self->aq_nic_cfg.is_vlan_tx_insert && skb_vlan_tag_present(skb)) {
451 dx_buff->vlan_tx_tag = skb_vlan_tag_get(skb);
452 dx_buff->len_pkt = skb->len;
453 dx_buff->is_vlan = 1U;
454 need_context_tag = true;
455 }
456
457 if (need_context_tag) {
e399553d
PB
458 dx = aq_ring_next_dx(ring, dx);
459 dx_buff = &ring->buff_ring[dx];
880b3ca5 460 dx_buff->flags = 0U;
e399553d
PB
461 ++ret;
462 }
463
e399553d
PB
464 dx_buff->len = skb_headlen(skb);
465 dx_buff->pa = dma_map_single(aq_nic_get_dev(self),
466 skb->data,
467 dx_buff->len,
468 DMA_TO_DEVICE);
97bde5c4 469
e399553d
PB
470 if (unlikely(dma_mapping_error(aq_nic_get_dev(self), dx_buff->pa)))
471 goto exit;
472
c7545689 473 first = dx_buff;
e399553d
PB
474 dx_buff->len_pkt = skb->len;
475 dx_buff->is_sop = 1U;
476 dx_buff->is_mapped = 1U;
97bde5c4
DV
477 ++ret;
478
479 if (skb->ip_summed == CHECKSUM_PARTIAL) {
e399553d
PB
480 dx_buff->is_ip_cso = (htons(ETH_P_IP) == skb->protocol) ?
481 1U : 0U;
ea0504f5
PB
482
483 if (ip_hdr(skb)->version == 4) {
484 dx_buff->is_tcp_cso =
485 (ip_hdr(skb)->protocol == IPPROTO_TCP) ?
486 1U : 0U;
487 dx_buff->is_udp_cso =
488 (ip_hdr(skb)->protocol == IPPROTO_UDP) ?
489 1U : 0U;
490 } else if (ip_hdr(skb)->version == 6) {
491 dx_buff->is_tcp_cso =
492 (ipv6_hdr(skb)->nexthdr == NEXTHDR_TCP) ?
493 1U : 0U;
494 dx_buff->is_udp_cso =
495 (ipv6_hdr(skb)->nexthdr == NEXTHDR_UDP) ?
496 1U : 0U;
497 }
97bde5c4
DV
498 }
499
500 for (; nr_frags--; ++frag_count) {
e399553d 501 unsigned int frag_len = 0U;
c7545689
PB
502 unsigned int buff_offset = 0U;
503 unsigned int buff_size = 0U;
97bde5c4
DV
504 dma_addr_t frag_pa;
505 skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_count];
506
507 frag_len = skb_frag_size(frag);
97bde5c4 508
c7545689
PB
509 while (frag_len) {
510 if (frag_len > AQ_CFG_TX_FRAME_MAX)
511 buff_size = AQ_CFG_TX_FRAME_MAX;
512 else
513 buff_size = frag_len;
514
515 frag_pa = skb_frag_dma_map(aq_nic_get_dev(self),
516 frag,
517 buff_offset,
518 buff_size,
519 DMA_TO_DEVICE);
520
521 if (unlikely(dma_mapping_error(aq_nic_get_dev(self),
522 frag_pa)))
523 goto mapping_error;
e399553d 524
e399553d
PB
525 dx = aq_ring_next_dx(ring, dx);
526 dx_buff = &ring->buff_ring[dx];
527
528 dx_buff->flags = 0U;
c7545689 529 dx_buff->len = buff_size;
e399553d
PB
530 dx_buff->pa = frag_pa;
531 dx_buff->is_mapped = 1U;
c7545689
PB
532 dx_buff->eop_index = 0xffffU;
533
534 frag_len -= buff_size;
535 buff_offset += buff_size;
97bde5c4 536
e399553d 537 ++ret;
97bde5c4 538 }
97bde5c4
DV
539 }
540
c7545689 541 first->eop_index = dx;
e399553d
PB
542 dx_buff->is_eop = 1U;
543 dx_buff->skb = skb;
544 goto exit;
545
546mapping_error:
547 for (dx = ring->sw_tail;
548 ret > 0;
549 --ret, dx = aq_ring_next_dx(ring, dx)) {
550 dx_buff = &ring->buff_ring[dx];
551
880b3ca5 552 if (!dx_buff->is_gso && !dx_buff->is_vlan && dx_buff->pa) {
e399553d
PB
553 if (unlikely(dx_buff->is_sop)) {
554 dma_unmap_single(aq_nic_get_dev(self),
555 dx_buff->pa,
556 dx_buff->len,
557 DMA_TO_DEVICE);
558 } else {
559 dma_unmap_page(aq_nic_get_dev(self),
560 dx_buff->pa,
561 dx_buff->len,
562 DMA_TO_DEVICE);
563 }
564 }
97bde5c4
DV
565 }
566
e399553d 567exit:
97bde5c4
DV
568 return ret;
569}
570
571int aq_nic_xmit(struct aq_nic_s *self, struct sk_buff *skb)
97bde5c4
DV
572{
573 struct aq_ring_s *ring = NULL;
574 unsigned int frags = 0U;
575 unsigned int vec = skb->queue_mapping % self->aq_nic_cfg.vecs;
576 unsigned int tc = 0U;
b350d7b8 577 int err = NETDEV_TX_OK;
97bde5c4
DV
578
579 frags = skb_shinfo(skb)->nr_frags + 1;
580
581 ring = self->aq_ring_tx[AQ_NIC_TCVEC2RING(self, tc, vec)];
582
97bde5c4
DV
583 if (frags > AQ_CFG_SKB_FRAGS_MAX) {
584 dev_kfree_skb_any(skb);
585 goto err_exit;
586 }
587
3aec6412 588 aq_ring_update_queue_state(ring);
97bde5c4 589
3aec6412
IR
590 /* Above status update may stop the queue. Check this. */
591 if (__netif_subqueue_stopped(self->ndev, ring->idx)) {
97bde5c4
DV
592 err = NETDEV_TX_BUSY;
593 goto err_exit;
594 }
595
278175ab 596 frags = aq_nic_map_skb(self, skb, ring);
97bde5c4 597
278175ab 598 if (likely(frags)) {
8fcb98f4 599 err = self->aq_hw_ops->hw_ring_tx_xmit(self->aq_hw,
23ee07ad 600 ring, frags);
278175ab 601 if (err >= 0) {
278175ab
PB
602 ++ring->stats.tx.packets;
603 ring->stats.tx.bytes += skb->len;
604 }
605 } else {
97bde5c4 606 err = NETDEV_TX_BUSY;
97bde5c4
DV
607 }
608
609err_exit:
97bde5c4
DV
610 return err;
611}
612
b82ee71a
IR
613int aq_nic_update_interrupt_moderation_settings(struct aq_nic_s *self)
614{
8fcb98f4 615 return self->aq_hw_ops->hw_interrupt_moderation_set(self->aq_hw);
b82ee71a
IR
616}
617
97bde5c4
DV
618int aq_nic_set_packet_filter(struct aq_nic_s *self, unsigned int flags)
619{
620 int err = 0;
621
8fcb98f4 622 err = self->aq_hw_ops->hw_packet_filter_set(self->aq_hw, flags);
97bde5c4
DV
623 if (err < 0)
624 goto err_exit;
625
626 self->packet_filter = flags;
627
628err_exit:
629 return err;
630}
631
632int aq_nic_set_multicast_list(struct aq_nic_s *self, struct net_device *ndev)
633{
94b3b542 634 unsigned int packet_filter = self->packet_filter;
97bde5c4
DV
635 struct netdev_hw_addr *ha = NULL;
636 unsigned int i = 0U;
637
94b3b542
IR
638 self->mc_list.count = 0;
639 if (netdev_uc_count(ndev) > AQ_HW_MULTICAST_ADDRESS_MAX) {
640 packet_filter |= IFF_PROMISC;
641 } else {
642 netdev_for_each_uc_addr(ha, ndev) {
643 ether_addr_copy(self->mc_list.ar[i++], ha->addr);
b21f502f 644
94b3b542
IR
645 if (i >= AQ_HW_MULTICAST_ADDRESS_MAX)
646 break;
647 }
97bde5c4
DV
648 }
649
94b3b542
IR
650 if (i + netdev_mc_count(ndev) > AQ_HW_MULTICAST_ADDRESS_MAX) {
651 packet_filter |= IFF_ALLMULTI;
b21f502f 652 } else {
94b3b542
IR
653 netdev_for_each_mc_addr(ha, ndev) {
654 ether_addr_copy(self->mc_list.ar[i++], ha->addr);
655
656 if (i >= AQ_HW_MULTICAST_ADDRESS_MAX)
657 break;
658 }
659 }
660
bfaa9f85 661 if (i > 0 && i <= AQ_HW_MULTICAST_ADDRESS_MAX) {
94b3b542
IR
662 packet_filter |= IFF_MULTICAST;
663 self->mc_list.count = i;
664 self->aq_hw_ops->hw_multicast_list_set(self->aq_hw,
665 self->mc_list.ar,
666 self->mc_list.count);
b21f502f 667 }
94b3b542 668 return aq_nic_set_packet_filter(self, packet_filter);
97bde5c4
DV
669}
670
671int aq_nic_set_mtu(struct aq_nic_s *self, int new_mtu)
672{
97bde5c4
DV
673 self->aq_nic_cfg.mtu = new_mtu;
674
d85fc17b 675 return 0;
97bde5c4
DV
676}
677
678int aq_nic_set_mac(struct aq_nic_s *self, struct net_device *ndev)
679{
8fcb98f4 680 return self->aq_hw_ops->hw_set_mac_address(self->aq_hw, ndev->dev_addr);
97bde5c4
DV
681}
682
683unsigned int aq_nic_get_link_speed(struct aq_nic_s *self)
684{
685 return self->link_status.mbps;
686}
687
688int aq_nic_get_regs(struct aq_nic_s *self, struct ethtool_regs *regs, void *p)
689{
690 u32 *regs_buff = p;
691 int err = 0;
692
693 regs->version = 1;
694
8fcb98f4
IR
695 err = self->aq_hw_ops->hw_get_regs(self->aq_hw,
696 self->aq_nic_cfg.aq_hw_caps,
697 regs_buff);
97bde5c4
DV
698 if (err < 0)
699 goto err_exit;
700
701err_exit:
702 return err;
703}
704
705int aq_nic_get_regs_count(struct aq_nic_s *self)
706{
8fcb98f4 707 return self->aq_nic_cfg.aq_hw_caps->mac_regs_count;
97bde5c4
DV
708}
709
710void aq_nic_get_stats(struct aq_nic_s *self, u64 *data)
711{
97bde5c4
DV
712 unsigned int i = 0U;
713 unsigned int count = 0U;
be08d839 714 struct aq_vec_s *aq_vec = NULL;
190f3438
DB
715 struct aq_stats_s *stats;
716
717 if (self->aq_fw_ops->update_stats) {
718 mutex_lock(&self->fwreq_mutex);
719 self->aq_fw_ops->update_stats(self->aq_hw);
720 mutex_unlock(&self->fwreq_mutex);
721 }
722 stats = self->aq_hw_ops->hw_get_hw_stats(self->aq_hw);
97bde5c4 723
be08d839 724 if (!stats)
97bde5c4
DV
725 goto err_exit;
726
be08d839
IR
727 data[i] = stats->uprc + stats->mprc + stats->bprc;
728 data[++i] = stats->uprc;
729 data[++i] = stats->mprc;
730 data[++i] = stats->bprc;
731 data[++i] = stats->erpt;
732 data[++i] = stats->uptc + stats->mptc + stats->bptc;
733 data[++i] = stats->uptc;
734 data[++i] = stats->mptc;
735 data[++i] = stats->bptc;
736 data[++i] = stats->ubrc;
737 data[++i] = stats->ubtc;
738 data[++i] = stats->mbrc;
739 data[++i] = stats->mbtc;
740 data[++i] = stats->bbrc;
741 data[++i] = stats->bbtc;
742 data[++i] = stats->ubrc + stats->mbrc + stats->bbrc;
743 data[++i] = stats->ubtc + stats->mbtc + stats->bbtc;
744 data[++i] = stats->dma_pkt_rc;
745 data[++i] = stats->dma_pkt_tc;
746 data[++i] = stats->dma_oct_rc;
747 data[++i] = stats->dma_oct_tc;
748 data[++i] = stats->dpc;
749
750 i++;
751
752 data += i;
97bde5c4
DV
753
754 for (i = 0U, aq_vec = self->aq_vec[0];
3013c498 755 aq_vec && self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i]) {
97bde5c4
DV
756 data += count;
757 aq_vec_get_sw_stats(aq_vec, data, &count);
758 }
759
760err_exit:;
97bde5c4
DV
761}
762
9f8a2203
IR
763static void aq_nic_update_ndev_stats(struct aq_nic_s *self)
764{
765 struct net_device *ndev = self->ndev;
8fcb98f4 766 struct aq_stats_s *stats = self->aq_hw_ops->hw_get_hw_stats(self->aq_hw);
9f8a2203 767
f55d477b
DB
768 ndev->stats.rx_packets = stats->dma_pkt_rc;
769 ndev->stats.rx_bytes = stats->dma_oct_rc;
9f8a2203 770 ndev->stats.rx_errors = stats->erpr;
f55d477b
DB
771 ndev->stats.rx_dropped = stats->dpc;
772 ndev->stats.tx_packets = stats->dma_pkt_tc;
773 ndev->stats.tx_bytes = stats->dma_oct_tc;
9f8a2203 774 ndev->stats.tx_errors = stats->erpt;
45cc1c7a 775 ndev->stats.multicast = stats->mprc;
9f8a2203
IR
776}
777
f8244ab5
PR
778void aq_nic_get_link_ksettings(struct aq_nic_s *self,
779 struct ethtool_link_ksettings *cmd)
97bde5c4 780{
854ab38c
IR
781 if (self->aq_nic_cfg.aq_hw_caps->media_type == AQ_HW_MEDIA_TYPE_FIBRE)
782 cmd->base.port = PORT_FIBRE;
783 else
784 cmd->base.port = PORT_TP;
97bde5c4 785 /* This driver supports only 10G capable adapters, so DUPLEX_FULL */
f8244ab5
PR
786 cmd->base.duplex = DUPLEX_FULL;
787 cmd->base.autoneg = self->aq_nic_cfg.is_autoneg;
788
8f9000a5
PB
789 ethtool_link_ksettings_zero_link_mode(cmd, supported);
790
8fcb98f4 791 if (self->aq_nic_cfg.aq_hw_caps->link_speed_msk & AQ_NIC_RATE_10G)
8f9000a5
PB
792 ethtool_link_ksettings_add_link_mode(cmd, supported,
793 10000baseT_Full);
794
8fcb98f4 795 if (self->aq_nic_cfg.aq_hw_caps->link_speed_msk & AQ_NIC_RATE_5G)
8f9000a5
PB
796 ethtool_link_ksettings_add_link_mode(cmd, supported,
797 5000baseT_Full);
798
8fcb98f4 799 if (self->aq_nic_cfg.aq_hw_caps->link_speed_msk & AQ_NIC_RATE_2GS)
8f9000a5
PB
800 ethtool_link_ksettings_add_link_mode(cmd, supported,
801 2500baseT_Full);
802
8fcb98f4 803 if (self->aq_nic_cfg.aq_hw_caps->link_speed_msk & AQ_NIC_RATE_1G)
8f9000a5
PB
804 ethtool_link_ksettings_add_link_mode(cmd, supported,
805 1000baseT_Full);
806
8fcb98f4 807 if (self->aq_nic_cfg.aq_hw_caps->link_speed_msk & AQ_NIC_RATE_100M)
8f9000a5
PB
808 ethtool_link_ksettings_add_link_mode(cmd, supported,
809 100baseT_Full);
810
8fcb98f4 811 if (self->aq_nic_cfg.aq_hw_caps->flow_control)
8f9000a5
PB
812 ethtool_link_ksettings_add_link_mode(cmd, supported,
813 Pause);
814
815 ethtool_link_ksettings_add_link_mode(cmd, supported, Autoneg);
854ab38c
IR
816
817 if (self->aq_nic_cfg.aq_hw_caps->media_type == AQ_HW_MEDIA_TYPE_FIBRE)
818 ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE);
819 else
820 ethtool_link_ksettings_add_link_mode(cmd, supported, TP);
8f9000a5
PB
821
822 ethtool_link_ksettings_zero_link_mode(cmd, advertising);
823
824 if (self->aq_nic_cfg.is_autoneg)
825 ethtool_link_ksettings_add_link_mode(cmd, advertising, Autoneg);
826
827 if (self->aq_nic_cfg.link_speed_msk & AQ_NIC_RATE_10G)
828 ethtool_link_ksettings_add_link_mode(cmd, advertising,
829 10000baseT_Full);
830
831 if (self->aq_nic_cfg.link_speed_msk & AQ_NIC_RATE_5G)
832 ethtool_link_ksettings_add_link_mode(cmd, advertising,
833 5000baseT_Full);
834
835 if (self->aq_nic_cfg.link_speed_msk & AQ_NIC_RATE_2GS)
836 ethtool_link_ksettings_add_link_mode(cmd, advertising,
837 2500baseT_Full);
838
839 if (self->aq_nic_cfg.link_speed_msk & AQ_NIC_RATE_1G)
840 ethtool_link_ksettings_add_link_mode(cmd, advertising,
841 1000baseT_Full);
842
843 if (self->aq_nic_cfg.link_speed_msk & AQ_NIC_RATE_100M)
844 ethtool_link_ksettings_add_link_mode(cmd, advertising,
845 100baseT_Full);
846
288551de 847 if (self->aq_nic_cfg.flow_control & AQ_NIC_FC_RX)
8f9000a5
PB
848 ethtool_link_ksettings_add_link_mode(cmd, advertising,
849 Pause);
850
35e8e8b4
IR
851 /* Asym is when either RX or TX, but not both */
852 if (!!(self->aq_nic_cfg.flow_control & AQ_NIC_FC_TX) ^
853 !!(self->aq_nic_cfg.flow_control & AQ_NIC_FC_RX))
288551de
IR
854 ethtool_link_ksettings_add_link_mode(cmd, advertising,
855 Asym_Pause);
856
854ab38c
IR
857 if (self->aq_nic_cfg.aq_hw_caps->media_type == AQ_HW_MEDIA_TYPE_FIBRE)
858 ethtool_link_ksettings_add_link_mode(cmd, advertising, FIBRE);
859 else
860 ethtool_link_ksettings_add_link_mode(cmd, advertising, TP);
97bde5c4
DV
861}
862
f8244ab5
PR
863int aq_nic_set_link_ksettings(struct aq_nic_s *self,
864 const struct ethtool_link_ksettings *cmd)
97bde5c4
DV
865{
866 u32 speed = 0U;
867 u32 rate = 0U;
868 int err = 0;
869
f8244ab5 870 if (cmd->base.autoneg == AUTONEG_ENABLE) {
8fcb98f4 871 rate = self->aq_nic_cfg.aq_hw_caps->link_speed_msk;
97bde5c4
DV
872 self->aq_nic_cfg.is_autoneg = true;
873 } else {
f8244ab5 874 speed = cmd->base.speed;
97bde5c4
DV
875
876 switch (speed) {
877 case SPEED_100:
878 rate = AQ_NIC_RATE_100M;
879 break;
880
881 case SPEED_1000:
882 rate = AQ_NIC_RATE_1G;
883 break;
884
885 case SPEED_2500:
886 rate = AQ_NIC_RATE_2GS;
887 break;
888
889 case SPEED_5000:
890 rate = AQ_NIC_RATE_5G;
891 break;
892
893 case SPEED_10000:
894 rate = AQ_NIC_RATE_10G;
895 break;
896
897 default:
898 err = -1;
899 goto err_exit;
900 break;
901 }
8fcb98f4 902 if (!(self->aq_nic_cfg.aq_hw_caps->link_speed_msk & rate)) {
97bde5c4
DV
903 err = -1;
904 goto err_exit;
905 }
906
907 self->aq_nic_cfg.is_autoneg = false;
908 }
909
f5dce08a 910 mutex_lock(&self->fwreq_mutex);
0c58c35f 911 err = self->aq_fw_ops->set_link_speed(self->aq_hw, rate);
f5dce08a 912 mutex_unlock(&self->fwreq_mutex);
97bde5c4
DV
913 if (err < 0)
914 goto err_exit;
915
916 self->aq_nic_cfg.link_speed_msk = rate;
917
918err_exit:
919 return err;
920}
921
922struct aq_nic_cfg_s *aq_nic_get_cfg(struct aq_nic_s *self)
923{
924 return &self->aq_nic_cfg;
925}
926
927u32 aq_nic_get_fw_version(struct aq_nic_s *self)
928{
929 u32 fw_version = 0U;
930
8fcb98f4 931 self->aq_hw_ops->hw_get_fw_version(self->aq_hw, &fw_version);
97bde5c4
DV
932
933 return fw_version;
934}
935
936int aq_nic_stop(struct aq_nic_s *self)
937{
938 struct aq_vec_s *aq_vec = NULL;
939 unsigned int i = 0U;
940
3aec6412 941 netif_tx_disable(self->ndev);
93d87b8f 942 netif_carrier_off(self->ndev);
97bde5c4
DV
943
944 del_timer_sync(&self->service_timer);
49544935 945 cancel_work_sync(&self->service_task);
97bde5c4 946
8fcb98f4 947 self->aq_hw_ops->hw_irq_disable(self->aq_hw, AQ_CFG_IRQ_MASK);
97bde5c4
DV
948
949 if (self->aq_nic_cfg.is_polling)
950 del_timer_sync(&self->polling_timer);
951 else
23ee07ad 952 aq_pci_func_free_irqs(self);
97bde5c4
DV
953
954 for (i = 0U, aq_vec = self->aq_vec[0];
955 self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i])
956 aq_vec_stop(aq_vec);
957
8fcb98f4 958 return self->aq_hw_ops->hw_stop(self->aq_hw);
97bde5c4
DV
959}
960
961void aq_nic_deinit(struct aq_nic_s *self)
962{
963 struct aq_vec_s *aq_vec = NULL;
964 unsigned int i = 0U;
965
966 if (!self)
967 goto err_exit;
968
969 for (i = 0U, aq_vec = self->aq_vec[0];
970 self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i])
971 aq_vec_deinit(aq_vec);
972
f5dce08a
ND
973 if (likely(self->aq_fw_ops->deinit)) {
974 mutex_lock(&self->fwreq_mutex);
975 self->aq_fw_ops->deinit(self->aq_hw);
976 mutex_unlock(&self->fwreq_mutex);
977 }
a0da96c0
YE
978
979 if (self->power_state != AQ_HW_POWER_STATE_D0 ||
f5dce08a
ND
980 self->aq_hw->aq_nic_cfg->wol)
981 if (likely(self->aq_fw_ops->set_power)) {
982 mutex_lock(&self->fwreq_mutex);
983 self->aq_fw_ops->set_power(self->aq_hw,
984 self->power_state,
985 self->ndev->dev_addr);
986 mutex_unlock(&self->fwreq_mutex);
987 }
988
97bde5c4
DV
989
990err_exit:;
991}
992
23ee07ad 993void aq_nic_free_vectors(struct aq_nic_s *self)
97bde5c4
DV
994{
995 unsigned int i = 0U;
996
997 if (!self)
998 goto err_exit;
999
08b5cf08 1000 for (i = ARRAY_SIZE(self->aq_vec); i--;) {
3013c498 1001 if (self->aq_vec[i]) {
97bde5c4 1002 aq_vec_free(self->aq_vec[i]);
3013c498
PB
1003 self->aq_vec[i] = NULL;
1004 }
97bde5c4
DV
1005 }
1006
1007err_exit:;
1008}
1009
1010int aq_nic_change_pm_state(struct aq_nic_s *self, pm_message_t *pm_msg)
1011{
1012 int err = 0;
1013
1014 if (!netif_running(self->ndev)) {
1015 err = 0;
d5919aeb 1016 goto out;
97bde5c4
DV
1017 }
1018 rtnl_lock();
1019 if (pm_msg->event & PM_EVENT_SLEEP || pm_msg->event & PM_EVENT_FREEZE) {
1020 self->power_state = AQ_HW_POWER_STATE_D3;
1021 netif_device_detach(self->ndev);
1022 netif_tx_stop_all_queues(self->ndev);
1023
1024 err = aq_nic_stop(self);
1025 if (err < 0)
1026 goto err_exit;
1027
1028 aq_nic_deinit(self);
1029 } else {
1030 err = aq_nic_init(self);
1031 if (err < 0)
1032 goto err_exit;
1033
1034 err = aq_nic_start(self);
1035 if (err < 0)
1036 goto err_exit;
1037
1038 netif_device_attach(self->ndev);
1039 netif_tx_start_all_queues(self->ndev);
1040 }
97bde5c4
DV
1041
1042err_exit:
d5919aeb
PB
1043 rtnl_unlock();
1044out:
97bde5c4
DV
1045 return err;
1046}
90869ddf
IR
1047
1048void aq_nic_shutdown(struct aq_nic_s *self)
1049{
1050 int err = 0;
1051
1052 if (!self->ndev)
1053 return;
1054
1055 rtnl_lock();
1056
1057 netif_device_detach(self->ndev);
1058
9a11aff2
IR
1059 if (netif_running(self->ndev)) {
1060 err = aq_nic_stop(self);
1061 if (err < 0)
1062 goto err_exit;
1063 }
90869ddf
IR
1064 aq_nic_deinit(self);
1065
1066err_exit:
1067 rtnl_unlock();
ea4854dd 1068}