net: ethernet: aquantia: Support for NIC-specific code
[linux-block.git] / drivers / net / ethernet / aquantia / atlantic / aq_nic.c
CommitLineData
97bde5c4
DV
1/*
2 * aQuantia Corporation Network Driver
3 * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 */
9
10/* File aq_nic.c: Definition of common code for NIC. */
11
12#include "aq_nic.h"
13#include "aq_ring.h"
14#include "aq_vec.h"
15#include "aq_hw.h"
16#include "aq_pci_func.h"
17#include "aq_nic_internal.h"
18
19#include <linux/netdevice.h>
20#include <linux/etherdevice.h>
21#include <linux/timer.h>
22#include <linux/cpu.h>
23#include <linux/ip.h>
24#include <linux/tcp.h>
25#include <net/ip.h>
26
27static void aq_nic_rss_init(struct aq_nic_s *self, unsigned int num_rss_queues)
28{
29 struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg;
30 struct aq_rss_parameters *rss_params = &cfg->aq_rss;
31 int i = 0;
32
33 static u8 rss_key[40] = {
34 0x1e, 0xad, 0x71, 0x87, 0x65, 0xfc, 0x26, 0x7d,
35 0x0d, 0x45, 0x67, 0x74, 0xcd, 0x06, 0x1a, 0x18,
36 0xb6, 0xc1, 0xf0, 0xc7, 0xbb, 0x18, 0xbe, 0xf8,
37 0x19, 0x13, 0x4b, 0xa9, 0xd0, 0x3e, 0xfe, 0x70,
38 0x25, 0x03, 0xab, 0x50, 0x6a, 0x8b, 0x82, 0x0c
39 };
40
41 rss_params->hash_secret_key_size = sizeof(rss_key);
42 memcpy(rss_params->hash_secret_key, rss_key, sizeof(rss_key));
43 rss_params->indirection_table_size = AQ_CFG_RSS_INDIRECTION_TABLE_MAX;
44
45 for (i = rss_params->indirection_table_size; i--;)
46 rss_params->indirection_table[i] = i & (num_rss_queues - 1);
47}
48
49/* Fills aq_nic_cfg with valid defaults */
50static void aq_nic_cfg_init_defaults(struct aq_nic_s *self)
51{
52 struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg;
53
54 cfg->aq_hw_caps = &self->aq_hw_caps;
55
56 cfg->vecs = AQ_CFG_VECS_DEF;
57 cfg->tcs = AQ_CFG_TCS_DEF;
58
59 cfg->rxds = AQ_CFG_RXDS_DEF;
60 cfg->txds = AQ_CFG_TXDS_DEF;
61
62 cfg->is_polling = AQ_CFG_IS_POLLING_DEF;
63
64 cfg->is_interrupt_moderation = AQ_CFG_IS_INTERRUPT_MODERATION_DEF;
65 cfg->itr = cfg->is_interrupt_moderation ?
66 AQ_CFG_INTERRUPT_MODERATION_RATE_DEF : 0U;
67
68 cfg->is_rss = AQ_CFG_IS_RSS_DEF;
69 cfg->num_rss_queues = AQ_CFG_NUM_RSS_QUEUES_DEF;
70 cfg->aq_rss.base_cpu_number = AQ_CFG_RSS_BASE_CPU_NUM_DEF;
71 cfg->flow_control = AQ_CFG_FC_MODE;
72
73 cfg->mtu = AQ_CFG_MTU_DEF;
74 cfg->link_speed_msk = AQ_CFG_SPEED_MSK;
75 cfg->is_autoneg = AQ_CFG_IS_AUTONEG_DEF;
76
77 cfg->is_lro = AQ_CFG_IS_LRO_DEF;
78
79 cfg->vlan_id = 0U;
80
81 aq_nic_rss_init(self, cfg->num_rss_queues);
82}
83
84/* Checks hw_caps and 'corrects' aq_nic_cfg in runtime */
85int aq_nic_cfg_start(struct aq_nic_s *self)
86{
87 struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg;
88
89 /*descriptors */
90 cfg->rxds = min(cfg->rxds, cfg->aq_hw_caps->rxds);
91 cfg->txds = min(cfg->txds, cfg->aq_hw_caps->txds);
92
93 /*rss rings */
94 cfg->vecs = min(cfg->vecs, cfg->aq_hw_caps->vecs);
95 cfg->vecs = min(cfg->vecs, num_online_cpus());
96 /* cfg->vecs should be power of 2 for RSS */
97 if (cfg->vecs >= 8U)
98 cfg->vecs = 8U;
99 else if (cfg->vecs >= 4U)
100 cfg->vecs = 4U;
101 else if (cfg->vecs >= 2U)
102 cfg->vecs = 2U;
103 else
104 cfg->vecs = 1U;
105
106 cfg->irq_type = aq_pci_func_get_irq_type(self->aq_pci_func);
107
108 if ((cfg->irq_type == AQ_HW_IRQ_LEGACY) ||
109 (self->aq_hw_caps.vecs == 1U) ||
110 (cfg->vecs == 1U)) {
111 cfg->is_rss = 0U;
112 cfg->vecs = 1U;
113 }
114
115 cfg->link_speed_msk &= self->aq_hw_caps.link_speed_msk;
116 cfg->hw_features = self->aq_hw_caps.hw_features;
117 return 0;
118}
119
120static void aq_nic_service_timer_cb(unsigned long param)
121{
122 struct aq_nic_s *self = (struct aq_nic_s *)param;
123 struct net_device *ndev = aq_nic_get_ndev(self);
124 int err = 0;
125 bool is_busy = false;
126 unsigned int i = 0U;
127 struct aq_hw_link_status_s link_status;
128 struct aq_ring_stats_rx_s stats_rx;
129 struct aq_ring_stats_tx_s stats_tx;
130
131 atomic_inc(&self->header.busy_count);
132 is_busy = true;
133 if (aq_utils_obj_test(&self->header.flags, AQ_NIC_FLAGS_IS_NOT_READY))
134 goto err_exit;
135
136 err = self->aq_hw_ops.hw_get_link_status(self->aq_hw, &link_status);
137 if (err < 0)
138 goto err_exit;
139
140 self->aq_hw_ops.hw_interrupt_moderation_set(self->aq_hw,
141 self->aq_nic_cfg.is_interrupt_moderation);
142
143 if (memcmp(&link_status, &self->link_status, sizeof(link_status))) {
144 if (link_status.mbps) {
145 aq_utils_obj_set(&self->header.flags,
146 AQ_NIC_FLAG_STARTED);
147 aq_utils_obj_clear(&self->header.flags,
148 AQ_NIC_LINK_DOWN);
149 netif_carrier_on(self->ndev);
150 } else {
151 netif_carrier_off(self->ndev);
152 aq_utils_obj_set(&self->header.flags, AQ_NIC_LINK_DOWN);
153 }
154
155 self->link_status = link_status;
156 }
157
158 memset(&stats_rx, 0U, sizeof(struct aq_ring_stats_rx_s));
159 memset(&stats_tx, 0U, sizeof(struct aq_ring_stats_tx_s));
160 for (i = AQ_DIMOF(self->aq_vec); i--;) {
161 if (self->aq_vec[i])
162 aq_vec_add_stats(self->aq_vec[i], &stats_rx, &stats_tx);
163 }
164
165 ndev->stats.rx_packets = stats_rx.packets;
166 ndev->stats.rx_bytes = stats_rx.bytes;
167 ndev->stats.rx_errors = stats_rx.errors;
168 ndev->stats.tx_packets = stats_tx.packets;
169 ndev->stats.tx_bytes = stats_tx.bytes;
170 ndev->stats.tx_errors = stats_tx.errors;
171
172err_exit:
173 if (is_busy)
174 atomic_dec(&self->header.busy_count);
175 mod_timer(&self->service_timer,
176 jiffies + AQ_CFG_SERVICE_TIMER_INTERVAL);
177}
178
179static void aq_nic_polling_timer_cb(unsigned long param)
180{
181 struct aq_nic_s *self = (struct aq_nic_s *)param;
182 struct aq_vec_s *aq_vec = NULL;
183 unsigned int i = 0U;
184
185 for (i = 0U, aq_vec = self->aq_vec[0];
186 self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i])
187 aq_vec_isr(i, (void *)aq_vec);
188
189 mod_timer(&self->polling_timer, jiffies +
190 AQ_CFG_POLLING_TIMER_INTERVAL);
191}
192
193static struct net_device *aq_nic_ndev_alloc(void)
194{
195 return alloc_etherdev_mq(sizeof(struct aq_nic_s), AQ_CFG_VECS_MAX);
196}
197
198struct aq_nic_s *aq_nic_alloc_cold(const struct net_device_ops *ndev_ops,
199 const struct ethtool_ops *et_ops,
200 struct device *dev,
201 struct aq_pci_func_s *aq_pci_func,
202 unsigned int port,
203 const struct aq_hw_ops *aq_hw_ops)
204{
205 struct net_device *ndev = NULL;
206 struct aq_nic_s *self = NULL;
207 int err = 0;
208
209 ndev = aq_nic_ndev_alloc();
210 self = netdev_priv(ndev);
211 if (!self) {
212 err = -EINVAL;
213 goto err_exit;
214 }
215
216 ndev->netdev_ops = ndev_ops;
217 ndev->ethtool_ops = et_ops;
218
219 SET_NETDEV_DEV(ndev, dev);
220
221 ndev->if_port = port;
222 self->ndev = ndev;
223
224 self->aq_pci_func = aq_pci_func;
225
226 self->aq_hw_ops = *aq_hw_ops;
227 self->port = (u8)port;
228
229 self->aq_hw = self->aq_hw_ops.create(aq_pci_func, self->port,
230 &self->aq_hw_ops);
231 err = self->aq_hw_ops.get_hw_caps(self->aq_hw, &self->aq_hw_caps);
232 if (err < 0)
233 goto err_exit;
234
235 aq_nic_cfg_init_defaults(self);
236
237err_exit:
238 if (err < 0) {
239 aq_nic_free_hot_resources(self);
240 self = NULL;
241 }
242 return self;
243}
244
245int aq_nic_ndev_register(struct aq_nic_s *self)
246{
247 int err = 0;
248 unsigned int i = 0U;
249
250 if (!self->ndev) {
251 err = -EINVAL;
252 goto err_exit;
253 }
254 err = self->aq_hw_ops.hw_get_mac_permanent(self->aq_hw,
255 self->aq_nic_cfg.aq_hw_caps,
256 self->ndev->dev_addr);
257 if (err < 0)
258 goto err_exit;
259
260#if defined(AQ_CFG_MAC_ADDR_PERMANENT)
261 {
262 static u8 mac_addr_permanent[] = AQ_CFG_MAC_ADDR_PERMANENT;
263
264 ether_addr_copy(self->ndev->dev_addr, mac_addr_permanent);
265 }
266#endif
267 err = register_netdev(self->ndev);
268 if (err < 0)
269 goto err_exit;
270
271 self->is_ndev_registered = true;
272 netif_carrier_off(self->ndev);
273
274 for (i = AQ_CFG_VECS_MAX; i--;)
275 aq_nic_ndev_queue_stop(self, i);
276
277err_exit:
278 return err;
279}
280
281int aq_nic_ndev_init(struct aq_nic_s *self)
282{
283 struct aq_hw_caps_s *aq_hw_caps = self->aq_nic_cfg.aq_hw_caps;
284 struct aq_nic_cfg_s *aq_nic_cfg = &self->aq_nic_cfg;
285
286 self->ndev->hw_features |= aq_hw_caps->hw_features;
287 self->ndev->features = aq_hw_caps->hw_features;
288 self->ndev->priv_flags = aq_hw_caps->hw_priv_flags;
289 self->ndev->mtu = aq_nic_cfg->mtu - ETH_HLEN;
290
291 return 0;
292}
293
294void aq_nic_ndev_free(struct aq_nic_s *self)
295{
296 if (!self->ndev)
297 goto err_exit;
298
299 if (self->is_ndev_registered)
300 unregister_netdev(self->ndev);
301
302 if (self->aq_hw)
303 self->aq_hw_ops.destroy(self->aq_hw);
304
305 free_netdev(self->ndev);
306
307err_exit:;
308}
309
310struct aq_nic_s *aq_nic_alloc_hot(struct net_device *ndev)
311{
312 struct aq_nic_s *self = NULL;
313 int err = 0;
314
315 if (!ndev) {
316 err = -EINVAL;
317 goto err_exit;
318 }
319 self = netdev_priv(ndev);
320
321 if (!self) {
322 err = -EINVAL;
323 goto err_exit;
324 }
325 if (netif_running(ndev)) {
326 unsigned int i;
327
328 for (i = AQ_CFG_VECS_MAX; i--;)
329 netif_stop_subqueue(ndev, i);
330 }
331
332 for (self->aq_vecs = 0; self->aq_vecs < self->aq_nic_cfg.vecs;
333 self->aq_vecs++) {
334 self->aq_vec[self->aq_vecs] =
335 aq_vec_alloc(self, self->aq_vecs, &self->aq_nic_cfg);
336 if (!self->aq_vec[self->aq_vecs]) {
337 err = -ENOMEM;
338 goto err_exit;
339 }
340 }
341
342err_exit:
343 if (err < 0) {
344 aq_nic_free_hot_resources(self);
345 self = NULL;
346 }
347 return self;
348}
349
350void aq_nic_set_tx_ring(struct aq_nic_s *self, unsigned int idx,
351 struct aq_ring_s *ring)
352{
353 self->aq_ring_tx[idx] = ring;
354}
355
356struct device *aq_nic_get_dev(struct aq_nic_s *self)
357{
358 return self->ndev->dev.parent;
359}
360
361struct net_device *aq_nic_get_ndev(struct aq_nic_s *self)
362{
363 return self->ndev;
364}
365
366int aq_nic_init(struct aq_nic_s *self)
367{
368 struct aq_vec_s *aq_vec = NULL;
369 int err = 0;
370 unsigned int i = 0U;
371
372 self->power_state = AQ_HW_POWER_STATE_D0;
373 err = self->aq_hw_ops.hw_reset(self->aq_hw);
374 if (err < 0)
375 goto err_exit;
376
377 err = self->aq_hw_ops.hw_init(self->aq_hw, &self->aq_nic_cfg,
378 aq_nic_get_ndev(self)->dev_addr);
379 if (err < 0)
380 goto err_exit;
381
382 for (i = 0U, aq_vec = self->aq_vec[0];
383 self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i])
384 aq_vec_init(aq_vec, &self->aq_hw_ops, self->aq_hw);
385
386err_exit:
387 return err;
388}
389
390void aq_nic_ndev_queue_start(struct aq_nic_s *self, unsigned int idx)
391{
392 netif_start_subqueue(self->ndev, idx);
393}
394
395void aq_nic_ndev_queue_stop(struct aq_nic_s *self, unsigned int idx)
396{
397 netif_stop_subqueue(self->ndev, idx);
398}
399
400int aq_nic_start(struct aq_nic_s *self)
401{
402 struct aq_vec_s *aq_vec = NULL;
403 int err = 0;
404 unsigned int i = 0U;
405
406 err = self->aq_hw_ops.hw_multicast_list_set(self->aq_hw,
407 self->mc_list.ar,
408 self->mc_list.count);
409 if (err < 0)
410 goto err_exit;
411
412 err = self->aq_hw_ops.hw_packet_filter_set(self->aq_hw,
413 self->packet_filter);
414 if (err < 0)
415 goto err_exit;
416
417 for (i = 0U, aq_vec = self->aq_vec[0];
418 self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i]) {
419 err = aq_vec_start(aq_vec);
420 if (err < 0)
421 goto err_exit;
422 }
423
424 err = self->aq_hw_ops.hw_start(self->aq_hw);
425 if (err < 0)
426 goto err_exit;
427
428 err = self->aq_hw_ops.hw_interrupt_moderation_set(self->aq_hw,
429 self->aq_nic_cfg.is_interrupt_moderation);
430 if (err < 0)
431 goto err_exit;
432 setup_timer(&self->service_timer, &aq_nic_service_timer_cb,
433 (unsigned long)self);
434 mod_timer(&self->service_timer, jiffies +
435 AQ_CFG_SERVICE_TIMER_INTERVAL);
436
437 if (self->aq_nic_cfg.is_polling) {
438 setup_timer(&self->polling_timer, &aq_nic_polling_timer_cb,
439 (unsigned long)self);
440 mod_timer(&self->polling_timer, jiffies +
441 AQ_CFG_POLLING_TIMER_INTERVAL);
442 } else {
443 for (i = 0U, aq_vec = self->aq_vec[0];
444 self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i]) {
445 err = aq_pci_func_alloc_irq(self->aq_pci_func, i,
446 self->ndev->name, aq_vec,
447 aq_vec_get_affinity_mask(aq_vec));
448 if (err < 0)
449 goto err_exit;
450 }
451
452 err = self->aq_hw_ops.hw_irq_enable(self->aq_hw,
453 AQ_CFG_IRQ_MASK);
454 if (err < 0)
455 goto err_exit;
456 }
457
458 for (i = 0U, aq_vec = self->aq_vec[0];
459 self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i])
460 aq_nic_ndev_queue_start(self, i);
461
462 err = netif_set_real_num_tx_queues(self->ndev, self->aq_vecs);
463 if (err < 0)
464 goto err_exit;
465
466 err = netif_set_real_num_rx_queues(self->ndev, self->aq_vecs);
467 if (err < 0)
468 goto err_exit;
469
470err_exit:
471 return err;
472}
473
474static unsigned int aq_nic_map_skb_frag(struct aq_nic_s *self,
475 struct sk_buff *skb,
476 struct aq_ring_buff_s *dx)
477{
478 unsigned int ret = 0U;
479 unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
480 unsigned int frag_count = 0U;
481
482 dx->flags = 0U;
483 dx->len = skb_headlen(skb);
484 dx->pa = dma_map_single(aq_nic_get_dev(self), skb->data, dx->len,
485 DMA_TO_DEVICE);
486 dx->len_pkt = skb->len;
487 dx->is_sop = 1U;
488 dx->is_mapped = 1U;
489
490 ++ret;
491
492 if (skb->ip_summed == CHECKSUM_PARTIAL) {
493 dx->is_ip_cso = (htons(ETH_P_IP) == skb->protocol) ? 1U : 0U;
494 dx->is_tcp_cso =
495 (ip_hdr(skb)->protocol == IPPROTO_TCP) ? 1U : 0U;
496 dx->is_udp_cso =
497 (ip_hdr(skb)->protocol == IPPROTO_UDP) ? 1U : 0U;
498 }
499
500 for (; nr_frags--; ++frag_count) {
501 unsigned int frag_len;
502 dma_addr_t frag_pa;
503 skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_count];
504
505 frag_len = skb_frag_size(frag);
506
507 frag_pa = skb_frag_dma_map(aq_nic_get_dev(self), frag, 0,
508 frag_len, DMA_TO_DEVICE);
509
510 while (frag_len > AQ_CFG_TX_FRAME_MAX) {
511 ++dx;
512 ++ret;
513 dx->flags = 0U;
514 dx->len = AQ_CFG_TX_FRAME_MAX;
515 dx->pa = frag_pa;
516 dx->is_mapped = 1U;
517
518 frag_len -= AQ_CFG_TX_FRAME_MAX;
519 frag_pa += AQ_CFG_TX_FRAME_MAX;
520 }
521
522 ++dx;
523 ++ret;
524
525 dx->flags = 0U;
526 dx->len = frag_len;
527 dx->pa = frag_pa;
528 dx->is_mapped = 1U;
529 }
530
531 dx->is_eop = 1U;
532 dx->skb = skb;
533
534 return ret;
535}
536
537static unsigned int aq_nic_map_skb_lso(struct aq_nic_s *self,
538 struct sk_buff *skb,
539 struct aq_ring_buff_s *dx)
540{
541 dx->flags = 0U;
542 dx->len_pkt = skb->len;
543 dx->len_l2 = ETH_HLEN;
544 dx->len_l3 = ip_hdrlen(skb);
545 dx->len_l4 = tcp_hdrlen(skb);
546 dx->mss = skb_shinfo(skb)->gso_size;
547 dx->is_txc = 1U;
548 return 1U;
549}
550
551static unsigned int aq_nic_map_skb(struct aq_nic_s *self, struct sk_buff *skb,
552 struct aq_ring_buff_s *dx)
553{
554 unsigned int ret = 0U;
555
556 if (unlikely(skb_is_gso(skb))) {
557 ret = aq_nic_map_skb_lso(self, skb, dx);
558 ++dx;
559 }
560
561 ret += aq_nic_map_skb_frag(self, skb, dx);
562
563 return ret;
564}
565
566int aq_nic_xmit(struct aq_nic_s *self, struct sk_buff *skb)
567__releases(&ring->lock)
568__acquires(&ring->lock)
569{
570 struct aq_ring_s *ring = NULL;
571 unsigned int frags = 0U;
572 unsigned int vec = skb->queue_mapping % self->aq_nic_cfg.vecs;
573 unsigned int tc = 0U;
574 unsigned int trys = AQ_CFG_LOCK_TRYS;
575 int err = 0;
576 bool is_nic_in_bad_state;
577 bool is_busy = false;
578 struct aq_ring_buff_s buffers[AQ_CFG_SKB_FRAGS_MAX];
579
580 frags = skb_shinfo(skb)->nr_frags + 1;
581
582 ring = self->aq_ring_tx[AQ_NIC_TCVEC2RING(self, tc, vec)];
583
584 atomic_inc(&self->header.busy_count);
585 is_busy = true;
586
587 if (frags > AQ_CFG_SKB_FRAGS_MAX) {
588 dev_kfree_skb_any(skb);
589 goto err_exit;
590 }
591
592 is_nic_in_bad_state = aq_utils_obj_test(&self->header.flags,
593 AQ_NIC_FLAGS_IS_NOT_TX_READY) ||
594 (aq_ring_avail_dx(ring) <
595 AQ_CFG_SKB_FRAGS_MAX);
596
597 if (is_nic_in_bad_state) {
598 aq_nic_ndev_queue_stop(self, ring->idx);
599 err = NETDEV_TX_BUSY;
600 goto err_exit;
601 }
602
603 do {
604 if (spin_trylock(&ring->header.lock)) {
605 frags = aq_nic_map_skb(self, skb, &buffers[0]);
606
607 aq_ring_tx_append_buffs(ring, &buffers[0], frags);
608
609 err = self->aq_hw_ops.hw_ring_tx_xmit(self->aq_hw,
610 ring, frags);
611 if (err >= 0) {
612 if (aq_ring_avail_dx(ring) <
613 AQ_CFG_SKB_FRAGS_MAX + 1)
614 aq_nic_ndev_queue_stop(self, ring->idx);
615 }
616 spin_unlock(&ring->header.lock);
617
618 if (err >= 0) {
619 ++ring->stats.tx.packets;
620 ring->stats.tx.bytes += skb->len;
621 }
622 break;
623 }
624 } while (--trys);
625
626 if (!trys) {
627 err = NETDEV_TX_BUSY;
628 goto err_exit;
629 }
630
631err_exit:
632 if (is_busy)
633 atomic_dec(&self->header.busy_count);
634 return err;
635}
636
637int aq_nic_set_packet_filter(struct aq_nic_s *self, unsigned int flags)
638{
639 int err = 0;
640
641 err = self->aq_hw_ops.hw_packet_filter_set(self->aq_hw, flags);
642 if (err < 0)
643 goto err_exit;
644
645 self->packet_filter = flags;
646
647err_exit:
648 return err;
649}
650
651int aq_nic_set_multicast_list(struct aq_nic_s *self, struct net_device *ndev)
652{
653 struct netdev_hw_addr *ha = NULL;
654 unsigned int i = 0U;
655
656 self->mc_list.count = 0U;
657
658 netdev_for_each_mc_addr(ha, ndev) {
659 ether_addr_copy(self->mc_list.ar[i++], ha->addr);
660 ++self->mc_list.count;
661 }
662
663 return self->aq_hw_ops.hw_multicast_list_set(self->aq_hw,
664 self->mc_list.ar,
665 self->mc_list.count);
666}
667
668int aq_nic_set_mtu(struct aq_nic_s *self, int new_mtu)
669{
670 int err = 0;
671
672 if (new_mtu > self->aq_hw_caps.mtu) {
673 err = -EINVAL;
674 goto err_exit;
675 }
676 self->aq_nic_cfg.mtu = new_mtu;
677
678err_exit:
679 return err;
680}
681
682int aq_nic_set_mac(struct aq_nic_s *self, struct net_device *ndev)
683{
684 return self->aq_hw_ops.hw_set_mac_address(self->aq_hw, ndev->dev_addr);
685}
686
687unsigned int aq_nic_get_link_speed(struct aq_nic_s *self)
688{
689 return self->link_status.mbps;
690}
691
692int aq_nic_get_regs(struct aq_nic_s *self, struct ethtool_regs *regs, void *p)
693{
694 u32 *regs_buff = p;
695 int err = 0;
696
697 regs->version = 1;
698
699 err = self->aq_hw_ops.hw_get_regs(self->aq_hw,
700 &self->aq_hw_caps, regs_buff);
701 if (err < 0)
702 goto err_exit;
703
704err_exit:
705 return err;
706}
707
708int aq_nic_get_regs_count(struct aq_nic_s *self)
709{
710 return self->aq_hw_caps.mac_regs_count;
711}
712
713void aq_nic_get_stats(struct aq_nic_s *self, u64 *data)
714{
715 struct aq_vec_s *aq_vec = NULL;
716 unsigned int i = 0U;
717 unsigned int count = 0U;
718 int err = 0;
719
720 err = self->aq_hw_ops.hw_get_hw_stats(self->aq_hw, data, &count);
721 if (err < 0)
722 goto err_exit;
723
724 data += count;
725 count = 0U;
726
727 for (i = 0U, aq_vec = self->aq_vec[0];
728 self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i]) {
729 data += count;
730 aq_vec_get_sw_stats(aq_vec, data, &count);
731 }
732
733err_exit:;
734 (void)err;
735}
736
737void aq_nic_get_link_settings(struct aq_nic_s *self, struct ethtool_cmd *cmd)
738{
739 cmd->port = PORT_TP;
740 cmd->transceiver = XCVR_EXTERNAL;
741 /* This driver supports only 10G capable adapters, so DUPLEX_FULL */
742 cmd->duplex = DUPLEX_FULL;
743 cmd->autoneg = self->aq_nic_cfg.is_autoneg;
744
745 cmd->supported |= (self->aq_hw_caps.link_speed_msk & AQ_NIC_RATE_10G) ?
746 ADVERTISED_10000baseT_Full : 0U;
747 cmd->supported |= (self->aq_hw_caps.link_speed_msk & AQ_NIC_RATE_1G) ?
748 ADVERTISED_1000baseT_Full : 0U;
749 cmd->supported |= (self->aq_hw_caps.link_speed_msk & AQ_NIC_RATE_100M) ?
750 ADVERTISED_100baseT_Full : 0U;
751 cmd->supported |= self->aq_hw_caps.flow_control ? SUPPORTED_Pause : 0;
752 cmd->supported |= SUPPORTED_Autoneg;
753
754 cmd->advertising = (self->aq_nic_cfg.is_autoneg) ?
755 ADVERTISED_Autoneg : 0U;
756 cmd->advertising |=
757 (self->aq_nic_cfg.link_speed_msk & AQ_NIC_RATE_10G) ?
758 ADVERTISED_10000baseT_Full : 0U;
759 cmd->advertising |=
760 (self->aq_nic_cfg.link_speed_msk & AQ_NIC_RATE_1G) ?
761 ADVERTISED_1000baseT_Full : 0U;
762
763 cmd->advertising |=
764 (self->aq_nic_cfg.link_speed_msk & AQ_NIC_RATE_100M) ?
765 ADVERTISED_100baseT_Full : 0U;
766 cmd->advertising |= (self->aq_nic_cfg.flow_control) ?
767 ADVERTISED_Pause : 0U;
768}
769
770int aq_nic_set_link_settings(struct aq_nic_s *self, struct ethtool_cmd *cmd)
771{
772 u32 speed = 0U;
773 u32 rate = 0U;
774 int err = 0;
775
776 if (cmd->autoneg == AUTONEG_ENABLE) {
777 rate = self->aq_hw_caps.link_speed_msk;
778 self->aq_nic_cfg.is_autoneg = true;
779 } else {
780 speed = ethtool_cmd_speed(cmd);
781
782 switch (speed) {
783 case SPEED_100:
784 rate = AQ_NIC_RATE_100M;
785 break;
786
787 case SPEED_1000:
788 rate = AQ_NIC_RATE_1G;
789 break;
790
791 case SPEED_2500:
792 rate = AQ_NIC_RATE_2GS;
793 break;
794
795 case SPEED_5000:
796 rate = AQ_NIC_RATE_5G;
797 break;
798
799 case SPEED_10000:
800 rate = AQ_NIC_RATE_10G;
801 break;
802
803 default:
804 err = -1;
805 goto err_exit;
806 break;
807 }
808 if (!(self->aq_hw_caps.link_speed_msk & rate)) {
809 err = -1;
810 goto err_exit;
811 }
812
813 self->aq_nic_cfg.is_autoneg = false;
814 }
815
816 err = self->aq_hw_ops.hw_set_link_speed(self->aq_hw, rate);
817 if (err < 0)
818 goto err_exit;
819
820 self->aq_nic_cfg.link_speed_msk = rate;
821
822err_exit:
823 return err;
824}
825
826struct aq_nic_cfg_s *aq_nic_get_cfg(struct aq_nic_s *self)
827{
828 return &self->aq_nic_cfg;
829}
830
831u32 aq_nic_get_fw_version(struct aq_nic_s *self)
832{
833 u32 fw_version = 0U;
834
835 self->aq_hw_ops.hw_get_fw_version(self->aq_hw, &fw_version);
836
837 return fw_version;
838}
839
840int aq_nic_stop(struct aq_nic_s *self)
841{
842 struct aq_vec_s *aq_vec = NULL;
843 unsigned int i = 0U;
844
845 for (i = 0U, aq_vec = self->aq_vec[0];
846 self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i])
847 aq_nic_ndev_queue_stop(self, i);
848
849 del_timer_sync(&self->service_timer);
850
851 self->aq_hw_ops.hw_irq_disable(self->aq_hw, AQ_CFG_IRQ_MASK);
852
853 if (self->aq_nic_cfg.is_polling)
854 del_timer_sync(&self->polling_timer);
855 else
856 aq_pci_func_free_irqs(self->aq_pci_func);
857
858 for (i = 0U, aq_vec = self->aq_vec[0];
859 self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i])
860 aq_vec_stop(aq_vec);
861
862 return self->aq_hw_ops.hw_stop(self->aq_hw);
863}
864
865void aq_nic_deinit(struct aq_nic_s *self)
866{
867 struct aq_vec_s *aq_vec = NULL;
868 unsigned int i = 0U;
869
870 if (!self)
871 goto err_exit;
872
873 for (i = 0U, aq_vec = self->aq_vec[0];
874 self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i])
875 aq_vec_deinit(aq_vec);
876
877 if (self->power_state == AQ_HW_POWER_STATE_D0) {
878 (void)self->aq_hw_ops.hw_deinit(self->aq_hw);
879 } else {
880 (void)self->aq_hw_ops.hw_set_power(self->aq_hw,
881 self->power_state);
882 }
883
884err_exit:;
885}
886
887void aq_nic_free_hot_resources(struct aq_nic_s *self)
888{
889 unsigned int i = 0U;
890
891 if (!self)
892 goto err_exit;
893
894 for (i = AQ_DIMOF(self->aq_vec); i--;) {
895 if (self->aq_vec[i])
896 aq_vec_free(self->aq_vec[i]);
897 }
898
899err_exit:;
900}
901
902int aq_nic_change_pm_state(struct aq_nic_s *self, pm_message_t *pm_msg)
903{
904 int err = 0;
905
906 if (!netif_running(self->ndev)) {
907 err = 0;
908 goto err_exit;
909 }
910 rtnl_lock();
911 if (pm_msg->event & PM_EVENT_SLEEP || pm_msg->event & PM_EVENT_FREEZE) {
912 self->power_state = AQ_HW_POWER_STATE_D3;
913 netif_device_detach(self->ndev);
914 netif_tx_stop_all_queues(self->ndev);
915
916 err = aq_nic_stop(self);
917 if (err < 0)
918 goto err_exit;
919
920 aq_nic_deinit(self);
921 } else {
922 err = aq_nic_init(self);
923 if (err < 0)
924 goto err_exit;
925
926 err = aq_nic_start(self);
927 if (err < 0)
928 goto err_exit;
929
930 netif_device_attach(self->ndev);
931 netif_tx_start_all_queues(self->ndev);
932 }
933 rtnl_unlock();
934
935err_exit:
936 return err;
937}