net: aquantia: Change confusing no_ff_addr to more meaningful name
[linux-2.6-block.git] / drivers / net / ethernet / aquantia / atlantic / hw_atl / hw_atl_b0.c
CommitLineData
bab6de8f
DV
1/*
2 * aQuantia Corporation Network Driver
3 * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 */
9
10/* File hw_atl_b0.c: Definition of Atlantic hardware specific functions. */
11
12#include "../aq_hw.h"
13#include "../aq_hw_utils.h"
14#include "../aq_ring.h"
db550615 15#include "../aq_nic.h"
bab6de8f
DV
16#include "hw_atl_b0.h"
17#include "hw_atl_utils.h"
18#include "hw_atl_llh.h"
19#include "hw_atl_b0_internal.h"
1e366161 20#include "hw_atl_llh_internal.h"
bab6de8f 21
4948293f
IR
22#define DEFAULT_B0_BOARD_BASIC_CAPABILITIES \
23 .is_64_dma = true, \
24 .msix_irqs = 4U, \
25 .irq_mask = ~0U, \
26 .vecs = HW_ATL_B0_RSS_MAX, \
27 .tcs = HW_ATL_B0_TC_MAX, \
28 .rxd_alignment = 1U, \
29 .rxd_size = HW_ATL_B0_RXD_SIZE, \
30 .rxds = 4U * 1024U, \
31 .txd_alignment = 1U, \
32 .txd_size = HW_ATL_B0_TXD_SIZE, \
33 .txds = 8U * 1024U, \
34 .txhwb_alignment = 4096U, \
35 .tx_rings = HW_ATL_B0_TX_RINGS, \
36 .rx_rings = HW_ATL_B0_RX_RINGS, \
37 .hw_features = NETIF_F_HW_CSUM | \
38 NETIF_F_RXCSUM | \
39 NETIF_F_RXHASH | \
40 NETIF_F_SG | \
41 NETIF_F_TSO | \
42 NETIF_F_LRO, \
43 .hw_priv_flags = IFF_UNICAST_FLT, \
44 .flow_control = true, \
45 .mtu = HW_ATL_B0_MTU_JUMBO, \
76c19c6c
IR
46 .mac_regs_count = 88, \
47 .hw_alive_check_addr = 0x10U
4948293f
IR
48
49const struct aq_hw_caps_s hw_atl_b0_caps_aqc100 = {
50 DEFAULT_B0_BOARD_BASIC_CAPABILITIES,
51 .media_type = AQ_HW_MEDIA_TYPE_FIBRE,
52 .link_speed_msk = HW_ATL_B0_RATE_10G |
53 HW_ATL_B0_RATE_5G |
54 HW_ATL_B0_RATE_2G5 |
55 HW_ATL_B0_RATE_1G |
56 HW_ATL_B0_RATE_100M,
57};
e4d02ca0 58
4948293f
IR
59const struct aq_hw_caps_s hw_atl_b0_caps_aqc107 = {
60 DEFAULT_B0_BOARD_BASIC_CAPABILITIES,
61 .media_type = AQ_HW_MEDIA_TYPE_TP,
62 .link_speed_msk = HW_ATL_B0_RATE_10G |
63 HW_ATL_B0_RATE_5G |
64 HW_ATL_B0_RATE_2G5 |
65 HW_ATL_B0_RATE_1G |
66 HW_ATL_B0_RATE_100M,
67};
e4d02ca0 68
4948293f
IR
69const struct aq_hw_caps_s hw_atl_b0_caps_aqc108 = {
70 DEFAULT_B0_BOARD_BASIC_CAPABILITIES,
71 .media_type = AQ_HW_MEDIA_TYPE_TP,
72 .link_speed_msk = HW_ATL_B0_RATE_5G |
73 HW_ATL_B0_RATE_2G5 |
74 HW_ATL_B0_RATE_1G |
75 HW_ATL_B0_RATE_100M,
76};
e4d02ca0 77
4948293f
IR
78const struct aq_hw_caps_s hw_atl_b0_caps_aqc109 = {
79 DEFAULT_B0_BOARD_BASIC_CAPABILITIES,
80 .media_type = AQ_HW_MEDIA_TYPE_TP,
81 .link_speed_msk = HW_ATL_B0_RATE_2G5 |
82 HW_ATL_B0_RATE_1G |
83 HW_ATL_B0_RATE_100M,
84};
bab6de8f 85
bab6de8f
DV
86static int hw_atl_b0_hw_reset(struct aq_hw_s *self)
87{
88 int err = 0;
89
8e1c072f
IR
90 hw_atl_glb_glb_reg_res_dis_set(self, 1U);
91 hw_atl_pci_pci_reg_res_dis_set(self, 0U);
92 hw_atl_rx_rx_reg_res_dis_set(self, 0U);
93 hw_atl_tx_tx_reg_res_dis_set(self, 0U);
bab6de8f
DV
94
95 HW_ATL_FLUSH();
8e1c072f 96 hw_atl_glb_soft_res_set(self, 1);
bab6de8f
DV
97
98 /* check 10 times by 1ms */
8e1c072f 99 AQ_HW_WAIT_FOR(hw_atl_glb_soft_res_get(self) == 0, 1000U, 10U);
bab6de8f
DV
100 if (err < 0)
101 goto err_exit;
102
8e1c072f
IR
103 hw_atl_itr_irq_reg_res_dis_set(self, 0U);
104 hw_atl_itr_res_irq_set(self, 1U);
bab6de8f
DV
105
106 /* check 10 times by 1ms */
8e1c072f 107 AQ_HW_WAIT_FOR(hw_atl_itr_res_irq_get(self) == 0, 1000U, 10U);
bab6de8f
DV
108 if (err < 0)
109 goto err_exit;
110
111 hw_atl_utils_mpi_set(self, MPI_RESET, 0x0U);
112
113 err = aq_hw_err_from_flags(self);
114
115err_exit:
116 return err;
117}
118
119static int hw_atl_b0_hw_qos_set(struct aq_hw_s *self)
120{
121 u32 tc = 0U;
122 u32 buff_size = 0U;
123 unsigned int i_priority = 0U;
124 bool is_rx_flow_control = false;
125
126 /* TPS Descriptor rate init */
8e1c072f
IR
127 hw_atl_tps_tx_pkt_shed_desc_rate_curr_time_res_set(self, 0x0U);
128 hw_atl_tps_tx_pkt_shed_desc_rate_lim_set(self, 0xA);
bab6de8f
DV
129
130 /* TPS VM init */
8e1c072f 131 hw_atl_tps_tx_pkt_shed_desc_vm_arb_mode_set(self, 0U);
bab6de8f
DV
132
133 /* TPS TC credits init */
8e1c072f
IR
134 hw_atl_tps_tx_pkt_shed_desc_tc_arb_mode_set(self, 0U);
135 hw_atl_tps_tx_pkt_shed_data_arb_mode_set(self, 0U);
bab6de8f 136
8e1c072f
IR
137 hw_atl_tps_tx_pkt_shed_tc_data_max_credit_set(self, 0xFFF, 0U);
138 hw_atl_tps_tx_pkt_shed_tc_data_weight_set(self, 0x64, 0U);
139 hw_atl_tps_tx_pkt_shed_desc_tc_max_credit_set(self, 0x50, 0U);
140 hw_atl_tps_tx_pkt_shed_desc_tc_weight_set(self, 0x1E, 0U);
bab6de8f
DV
141
142 /* Tx buf size */
143 buff_size = HW_ATL_B0_TXBUF_MAX;
144
8e1c072f
IR
145 hw_atl_tpb_tx_pkt_buff_size_per_tc_set(self, buff_size, tc);
146 hw_atl_tpb_tx_buff_hi_threshold_per_tc_set(self,
147 (buff_size *
148 (1024 / 32U) * 66U) /
149 100U, tc);
150 hw_atl_tpb_tx_buff_lo_threshold_per_tc_set(self,
151 (buff_size *
152 (1024 / 32U) * 50U) /
153 100U, tc);
bab6de8f
DV
154
155 /* QoS Rx buf size per TC */
156 tc = 0;
157 is_rx_flow_control = (AQ_NIC_FC_RX & self->aq_nic_cfg->flow_control);
158 buff_size = HW_ATL_B0_RXBUF_MAX;
159
8e1c072f
IR
160 hw_atl_rpb_rx_pkt_buff_size_per_tc_set(self, buff_size, tc);
161 hw_atl_rpb_rx_buff_hi_threshold_per_tc_set(self,
162 (buff_size *
163 (1024U / 32U) * 66U) /
164 100U, tc);
165 hw_atl_rpb_rx_buff_lo_threshold_per_tc_set(self,
166 (buff_size *
167 (1024U / 32U) * 50U) /
168 100U, tc);
169 hw_atl_rpb_rx_xoff_en_per_tc_set(self, is_rx_flow_control ? 1U : 0U, tc);
bab6de8f
DV
170
171 /* QoS 802.1p priority -> TC mapping */
172 for (i_priority = 8U; i_priority--;)
8e1c072f 173 hw_atl_rpf_rpb_user_priority_tc_map_set(self, i_priority, 0U);
bab6de8f
DV
174
175 return aq_hw_err_from_flags(self);
176}
177
178static int hw_atl_b0_hw_rss_hash_set(struct aq_hw_s *self,
179 struct aq_rss_parameters *rss_params)
180{
1a713f87 181 struct aq_nic_cfg_s *cfg = self->aq_nic_cfg;
bab6de8f
DV
182 int err = 0;
183 unsigned int i = 0U;
184 unsigned int addr = 0U;
185
bab6de8f
DV
186 for (i = 10, addr = 0U; i--; ++addr) {
187 u32 key_data = cfg->is_rss ?
188 __swab32(rss_params->hash_secret_key[i]) : 0U;
8e1c072f
IR
189 hw_atl_rpf_rss_key_wr_data_set(self, key_data);
190 hw_atl_rpf_rss_key_addr_set(self, addr);
191 hw_atl_rpf_rss_key_wr_en_set(self, 1U);
192 AQ_HW_WAIT_FOR(hw_atl_rpf_rss_key_wr_en_get(self) == 0,
193 1000U, 10U);
bab6de8f
DV
194 if (err < 0)
195 goto err_exit;
196 }
197
198 err = aq_hw_err_from_flags(self);
199
200err_exit:
201 return err;
202}
203
204static int hw_atl_b0_hw_rss_set(struct aq_hw_s *self,
205 struct aq_rss_parameters *rss_params)
206{
207 u8 *indirection_table = rss_params->indirection_table;
208 u32 i = 0U;
209 u32 num_rss_queues = max(1U, self->aq_nic_cfg->num_rss_queues);
210 int err = 0;
211 u16 bitary[(HW_ATL_B0_RSS_REDIRECTION_MAX *
212 HW_ATL_B0_RSS_REDIRECTION_BITS / 16U)];
213
214 memset(bitary, 0, sizeof(bitary));
215
216 for (i = HW_ATL_B0_RSS_REDIRECTION_MAX; i--;) {
217 (*(u32 *)(bitary + ((i * 3U) / 16U))) |=
218 ((indirection_table[i] % num_rss_queues) <<
219 ((i * 3U) & 0xFU));
220 }
221
08b5cf08 222 for (i = ARRAY_SIZE(bitary); i--;) {
8e1c072f
IR
223 hw_atl_rpf_rss_redir_tbl_wr_data_set(self, bitary[i]);
224 hw_atl_rpf_rss_redir_tbl_addr_set(self, i);
225 hw_atl_rpf_rss_redir_wr_en_set(self, 1U);
226 AQ_HW_WAIT_FOR(hw_atl_rpf_rss_redir_wr_en_get(self) == 0,
227 1000U, 10U);
bab6de8f
DV
228 if (err < 0)
229 goto err_exit;
230 }
231
232 err = aq_hw_err_from_flags(self);
233
234err_exit:
235 return err;
236}
237
238static int hw_atl_b0_hw_offload_set(struct aq_hw_s *self,
239 struct aq_nic_cfg_s *aq_nic_cfg)
240{
bab6de8f
DV
241 unsigned int i;
242
243 /* TX checksums offloads*/
8e1c072f
IR
244 hw_atl_tpo_ipv4header_crc_offload_en_set(self, 1);
245 hw_atl_tpo_tcp_udp_crc_offload_en_set(self, 1);
bab6de8f
DV
246
247 /* RX checksums offloads*/
8e1c072f
IR
248 hw_atl_rpo_ipv4header_crc_offload_en_set(self, 1);
249 hw_atl_rpo_tcp_udp_crc_offload_en_set(self, 1);
bab6de8f
DV
250
251 /* LSO offloads*/
8e1c072f 252 hw_atl_tdm_large_send_offload_en_set(self, 0xFFFFFFFFU);
bab6de8f
DV
253
254/* LRO offloads */
255 {
256 unsigned int val = (8U < HW_ATL_B0_LRO_RXD_MAX) ? 0x3U :
257 ((4U < HW_ATL_B0_LRO_RXD_MAX) ? 0x2U :
258 ((2U < HW_ATL_B0_LRO_RXD_MAX) ? 0x1U : 0x0));
259
260 for (i = 0; i < HW_ATL_B0_RINGS_MAX; i++)
8e1c072f 261 hw_atl_rpo_lro_max_num_of_descriptors_set(self, val, i);
bab6de8f 262
8e1c072f
IR
263 hw_atl_rpo_lro_time_base_divider_set(self, 0x61AU);
264 hw_atl_rpo_lro_inactive_interval_set(self, 0);
265 hw_atl_rpo_lro_max_coalescing_interval_set(self, 2);
bab6de8f 266
8e1c072f 267 hw_atl_rpo_lro_qsessions_lim_set(self, 1U);
bab6de8f 268
8e1c072f 269 hw_atl_rpo_lro_total_desc_lim_set(self, 2U);
bab6de8f 270
8e1c072f 271 hw_atl_rpo_lro_patch_optimization_en_set(self, 0U);
bab6de8f 272
8e1c072f 273 hw_atl_rpo_lro_min_pay_of_first_pkt_set(self, 10U);
bab6de8f 274
8e1c072f 275 hw_atl_rpo_lro_pkt_lim_set(self, 1U);
bab6de8f 276
8e1c072f
IR
277 hw_atl_rpo_lro_en_set(self,
278 aq_nic_cfg->is_lro ? 0xFFFFFFFFU : 0U);
bab6de8f 279 }
d2be3667 280 return aq_hw_err_from_flags(self);
bab6de8f
DV
281}
282
283static int hw_atl_b0_hw_init_tx_path(struct aq_hw_s *self)
284{
8e1c072f
IR
285 hw_atl_thm_lso_tcp_flag_of_first_pkt_set(self, 0x0FF6U);
286 hw_atl_thm_lso_tcp_flag_of_middle_pkt_set(self, 0x0FF6U);
287 hw_atl_thm_lso_tcp_flag_of_last_pkt_set(self, 0x0F7FU);
bab6de8f
DV
288
289 /* Tx interrupts */
8e1c072f 290 hw_atl_tdm_tx_desc_wr_wb_irq_en_set(self, 1U);
bab6de8f
DV
291
292 /* misc */
293 aq_hw_write_reg(self, 0x00007040U, IS_CHIP_FEATURE(TPO2) ?
294 0x00010000U : 0x00000000U);
8e1c072f
IR
295 hw_atl_tdm_tx_dca_en_set(self, 0U);
296 hw_atl_tdm_tx_dca_mode_set(self, 0U);
bab6de8f 297
8e1c072f 298 hw_atl_tpb_tx_path_scp_ins_en_set(self, 1U);
bab6de8f
DV
299
300 return aq_hw_err_from_flags(self);
301}
302
303static int hw_atl_b0_hw_init_rx_path(struct aq_hw_s *self)
304{
305 struct aq_nic_cfg_s *cfg = self->aq_nic_cfg;
306 int i;
307
308 /* Rx TC/RSS number config */
8e1c072f 309 hw_atl_rpb_rpf_rx_traf_class_mode_set(self, 1U);
bab6de8f
DV
310
311 /* Rx flow control */
8e1c072f 312 hw_atl_rpb_rx_flow_ctl_mode_set(self, 1U);
bab6de8f
DV
313
314 /* RSS Ring selection */
8e1c072f 315 hw_atl_reg_rx_flr_rss_control1set(self, cfg->is_rss ?
bab6de8f
DV
316 0xB3333333U : 0x00000000U);
317
318 /* Multicast filters */
319 for (i = HW_ATL_B0_MAC_MAX; i--;) {
8e1c072f
IR
320 hw_atl_rpfl2_uc_flr_en_set(self, (i == 0U) ? 1U : 0U, i);
321 hw_atl_rpfl2unicast_flr_act_set(self, 1U, i);
bab6de8f
DV
322 }
323
8e1c072f
IR
324 hw_atl_reg_rx_flr_mcst_flr_msk_set(self, 0x00000000U);
325 hw_atl_reg_rx_flr_mcst_flr_set(self, 0x00010FFFU, 0U);
bab6de8f
DV
326
327 /* Vlan filters */
8e1c072f
IR
328 hw_atl_rpf_vlan_outer_etht_set(self, 0x88A8U);
329 hw_atl_rpf_vlan_inner_etht_set(self, 0x8100U);
bab6de8f
DV
330
331 if (cfg->vlan_id) {
8e1c072f
IR
332 hw_atl_rpf_vlan_flr_act_set(self, 1U, 0U);
333 hw_atl_rpf_vlan_id_flr_set(self, 0U, 0U);
334 hw_atl_rpf_vlan_flr_en_set(self, 0U, 0U);
bab6de8f 335
8e1c072f
IR
336 hw_atl_rpf_vlan_accept_untagged_packets_set(self, 1U);
337 hw_atl_rpf_vlan_untagged_act_set(self, 1U);
bab6de8f 338
8e1c072f
IR
339 hw_atl_rpf_vlan_flr_act_set(self, 1U, 1U);
340 hw_atl_rpf_vlan_id_flr_set(self, cfg->vlan_id, 0U);
341 hw_atl_rpf_vlan_flr_en_set(self, 1U, 1U);
bab6de8f 342 } else {
8e1c072f 343 hw_atl_rpf_vlan_prom_mode_en_set(self, 1);
bab6de8f
DV
344 }
345
346 /* Rx Interrupts */
8e1c072f 347 hw_atl_rdm_rx_desc_wr_wb_irq_en_set(self, 1U);
bab6de8f
DV
348
349 /* misc */
350 aq_hw_write_reg(self, 0x00005040U,
351 IS_CHIP_FEATURE(RPF2) ? 0x000F0000U : 0x00000000U);
352
8e1c072f
IR
353 hw_atl_rpfl2broadcast_flr_act_set(self, 1U);
354 hw_atl_rpfl2broadcast_count_threshold_set(self, 0xFFFFU & (~0U / 256U));
bab6de8f 355
8e1c072f
IR
356 hw_atl_rdm_rx_dca_en_set(self, 0U);
357 hw_atl_rdm_rx_dca_mode_set(self, 0U);
bab6de8f
DV
358
359 return aq_hw_err_from_flags(self);
360}
361
362static int hw_atl_b0_hw_mac_addr_set(struct aq_hw_s *self, u8 *mac_addr)
363{
364 int err = 0;
365 unsigned int h = 0U;
366 unsigned int l = 0U;
367
368 if (!mac_addr) {
369 err = -EINVAL;
370 goto err_exit;
371 }
372 h = (mac_addr[0] << 8) | (mac_addr[1]);
373 l = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
374 (mac_addr[4] << 8) | mac_addr[5];
375
8e1c072f
IR
376 hw_atl_rpfl2_uc_flr_en_set(self, 0U, HW_ATL_B0_MAC);
377 hw_atl_rpfl2unicast_dest_addresslsw_set(self, l, HW_ATL_B0_MAC);
378 hw_atl_rpfl2unicast_dest_addressmsw_set(self, h, HW_ATL_B0_MAC);
379 hw_atl_rpfl2_uc_flr_en_set(self, 1U, HW_ATL_B0_MAC);
bab6de8f
DV
380
381 err = aq_hw_err_from_flags(self);
382
383err_exit:
384 return err;
385}
386
1a713f87 387static int hw_atl_b0_hw_init(struct aq_hw_s *self, u8 *mac_addr)
bab6de8f
DV
388{
389 static u32 aq_hw_atl_igcr_table_[4][2] = {
390 { 0x20000000U, 0x20000000U }, /* AQ_IRQ_INVALID */
391 { 0x20000080U, 0x20000080U }, /* AQ_IRQ_LEGACY */
392 { 0x20000021U, 0x20000025U }, /* AQ_IRQ_MSI */
393 { 0x20000022U, 0x20000026U } /* AQ_IRQ_MSIX */
394 };
395
396 int err = 0;
1e366161 397 u32 val;
bab6de8f 398
1a713f87 399 struct aq_nic_cfg_s *aq_nic_cfg = self->aq_nic_cfg;
bab6de8f
DV
400
401 hw_atl_b0_hw_init_tx_path(self);
402 hw_atl_b0_hw_init_rx_path(self);
403
404 hw_atl_b0_hw_mac_addr_set(self, mac_addr);
405
406 hw_atl_utils_mpi_set(self, MPI_INIT, aq_nic_cfg->link_speed_msk);
407
408 hw_atl_b0_hw_qos_set(self);
409 hw_atl_b0_hw_rss_set(self, &aq_nic_cfg->aq_rss);
410 hw_atl_b0_hw_rss_hash_set(self, &aq_nic_cfg->aq_rss);
411
1e366161 412 /* Force limit MRRS on RDM/TDM to 2K */
3230d011
IR
413 val = aq_hw_read_reg(self, HW_ATL_PCI_REG_CONTROL6_ADR);
414 aq_hw_write_reg(self, HW_ATL_PCI_REG_CONTROL6_ADR,
415 (val & ~0x707) | 0x404);
1e366161
IR
416
417 /* TX DMA total request limit. B0 hardware is not capable to
418 * handle more than (8K-MRRS) incoming DMA data.
419 * Value 24 in 256byte units
420 */
3230d011 421 aq_hw_write_reg(self, HW_ATL_TX_DMA_TOTAL_REQ_LIMIT_ADR, 24);
1e366161 422
f3e27784
IR
423 /* Reset link status and read out initial hardware counters */
424 self->aq_link_status.mbps = 0;
425 hw_atl_utils_update_stats(self);
426
bab6de8f
DV
427 err = aq_hw_err_from_flags(self);
428 if (err < 0)
429 goto err_exit;
430
431 /* Interrupts */
8e1c072f
IR
432 hw_atl_reg_irq_glb_ctl_set(self,
433 aq_hw_atl_igcr_table_[aq_nic_cfg->irq_type]
bab6de8f
DV
434 [(aq_nic_cfg->vecs > 1U) ?
435 1 : 0]);
436
8e1c072f 437 hw_atl_itr_irq_auto_masklsw_set(self, aq_nic_cfg->aq_hw_caps->irq_mask);
bab6de8f
DV
438
439 /* Interrupts */
8e1c072f
IR
440 hw_atl_reg_gen_irq_map_set(self,
441 ((HW_ATL_B0_ERR_INT << 0x18) | (1U << 0x1F)) |
bab6de8f
DV
442 ((HW_ATL_B0_ERR_INT << 0x10) | (1U << 0x17)), 0U);
443
444 hw_atl_b0_hw_offload_set(self, aq_nic_cfg);
445
446err_exit:
447 return err;
448}
449
450static int hw_atl_b0_hw_ring_tx_start(struct aq_hw_s *self,
451 struct aq_ring_s *ring)
452{
8e1c072f 453 hw_atl_tdm_tx_desc_en_set(self, 1, ring->idx);
bab6de8f
DV
454 return aq_hw_err_from_flags(self);
455}
456
457static int hw_atl_b0_hw_ring_rx_start(struct aq_hw_s *self,
458 struct aq_ring_s *ring)
459{
8e1c072f 460 hw_atl_rdm_rx_desc_en_set(self, 1, ring->idx);
bab6de8f
DV
461 return aq_hw_err_from_flags(self);
462}
463
464static int hw_atl_b0_hw_start(struct aq_hw_s *self)
465{
8e1c072f
IR
466 hw_atl_tpb_tx_buff_en_set(self, 1);
467 hw_atl_rpb_rx_buff_en_set(self, 1);
bab6de8f
DV
468 return aq_hw_err_from_flags(self);
469}
470
471static int hw_atl_b0_hw_tx_ring_tail_update(struct aq_hw_s *self,
472 struct aq_ring_s *ring)
473{
8e1c072f 474 hw_atl_reg_tx_dma_desc_tail_ptr_set(self, ring->sw_tail, ring->idx);
bab6de8f
DV
475 return 0;
476}
477
478static int hw_atl_b0_hw_ring_tx_xmit(struct aq_hw_s *self,
479 struct aq_ring_s *ring,
480 unsigned int frags)
481{
482 struct aq_ring_buff_s *buff = NULL;
483 struct hw_atl_txd_s *txd = NULL;
484 unsigned int buff_pa_len = 0U;
485 unsigned int pkt_len = 0U;
486 unsigned int frag_count = 0U;
487 bool is_gso = false;
488
489 buff = &ring->buff_ring[ring->sw_tail];
490 pkt_len = (buff->is_eop && buff->is_sop) ? buff->len : buff->len_pkt;
491
492 for (frag_count = 0; frag_count < frags; frag_count++) {
493 txd = (struct hw_atl_txd_s *)&ring->dx_ring[ring->sw_tail *
494 HW_ATL_B0_TXD_SIZE];
495 txd->ctl = 0;
496 txd->ctl2 = 0;
497 txd->buf_addr = 0;
498
499 buff = &ring->buff_ring[ring->sw_tail];
500
501 if (buff->is_txc) {
502 txd->ctl |= (buff->len_l3 << 31) |
503 (buff->len_l2 << 24) |
504 HW_ATL_B0_TXD_CTL_CMD_TCP |
505 HW_ATL_B0_TXD_CTL_DESC_TYPE_TXC;
506 txd->ctl2 |= (buff->mss << 16) |
507 (buff->len_l4 << 8) |
508 (buff->len_l3 >> 1);
509
510 pkt_len -= (buff->len_l4 +
511 buff->len_l3 +
512 buff->len_l2);
513 is_gso = true;
386aff88
PB
514
515 if (buff->is_ipv6)
516 txd->ctl |= HW_ATL_B0_TXD_CTL_CMD_IPV6;
bab6de8f
DV
517 } else {
518 buff_pa_len = buff->len;
519
520 txd->buf_addr = buff->pa;
521 txd->ctl |= (HW_ATL_B0_TXD_CTL_BLEN &
522 ((u32)buff_pa_len << 4));
523 txd->ctl |= HW_ATL_B0_TXD_CTL_DESC_TYPE_TXD;
524 /* PAY_LEN */
525 txd->ctl2 |= HW_ATL_B0_TXD_CTL2_LEN & (pkt_len << 14);
526
527 if (is_gso) {
528 txd->ctl |= HW_ATL_B0_TXD_CTL_CMD_LSO;
529 txd->ctl2 |= HW_ATL_B0_TXD_CTL2_CTX_EN;
530 }
531
532 /* Tx checksum offloads */
533 if (buff->is_ip_cso)
534 txd->ctl |= HW_ATL_B0_TXD_CTL_CMD_IPCSO;
535
536 if (buff->is_udp_cso || buff->is_tcp_cso)
537 txd->ctl |= HW_ATL_B0_TXD_CTL_CMD_TUCSO;
538
539 if (unlikely(buff->is_eop)) {
540 txd->ctl |= HW_ATL_B0_TXD_CTL_EOP;
541 txd->ctl |= HW_ATL_B0_TXD_CTL_CMD_WB;
5d73bb86 542 is_gso = false;
bab6de8f
DV
543 }
544 }
545
546 ring->sw_tail = aq_ring_next_dx(ring, ring->sw_tail);
547 }
548
549 hw_atl_b0_hw_tx_ring_tail_update(self, ring);
550 return aq_hw_err_from_flags(self);
551}
552
553static int hw_atl_b0_hw_ring_rx_init(struct aq_hw_s *self,
554 struct aq_ring_s *aq_ring,
555 struct aq_ring_param_s *aq_ring_param)
556{
557 u32 dma_desc_addr_lsw = (u32)aq_ring->dx_ring_pa;
558 u32 dma_desc_addr_msw = (u32)(((u64)aq_ring->dx_ring_pa) >> 32);
559
8e1c072f 560 hw_atl_rdm_rx_desc_en_set(self, false, aq_ring->idx);
bab6de8f 561
8e1c072f 562 hw_atl_rdm_rx_desc_head_splitting_set(self, 0U, aq_ring->idx);
bab6de8f 563
8e1c072f
IR
564 hw_atl_reg_rx_dma_desc_base_addresslswset(self, dma_desc_addr_lsw,
565 aq_ring->idx);
bab6de8f 566
8e1c072f
IR
567 hw_atl_reg_rx_dma_desc_base_addressmswset(self,
568 dma_desc_addr_msw, aq_ring->idx);
bab6de8f 569
8e1c072f 570 hw_atl_rdm_rx_desc_len_set(self, aq_ring->size / 8U, aq_ring->idx);
bab6de8f 571
8e1c072f
IR
572 hw_atl_rdm_rx_desc_data_buff_size_set(self,
573 AQ_CFG_RX_FRAME_MAX / 1024U,
bab6de8f
DV
574 aq_ring->idx);
575
8e1c072f
IR
576 hw_atl_rdm_rx_desc_head_buff_size_set(self, 0U, aq_ring->idx);
577 hw_atl_rdm_rx_desc_head_splitting_set(self, 0U, aq_ring->idx);
578 hw_atl_rpo_rx_desc_vlan_stripping_set(self, 0U, aq_ring->idx);
bab6de8f
DV
579
580 /* Rx ring set mode */
581
582 /* Mapping interrupt vector */
8e1c072f
IR
583 hw_atl_itr_irq_map_rx_set(self, aq_ring_param->vec_idx, aq_ring->idx);
584 hw_atl_itr_irq_map_en_rx_set(self, true, aq_ring->idx);
bab6de8f 585
8e1c072f
IR
586 hw_atl_rdm_cpu_id_set(self, aq_ring_param->cpu, aq_ring->idx);
587 hw_atl_rdm_rx_desc_dca_en_set(self, 0U, aq_ring->idx);
588 hw_atl_rdm_rx_head_dca_en_set(self, 0U, aq_ring->idx);
589 hw_atl_rdm_rx_pld_dca_en_set(self, 0U, aq_ring->idx);
bab6de8f
DV
590
591 return aq_hw_err_from_flags(self);
592}
593
594static int hw_atl_b0_hw_ring_tx_init(struct aq_hw_s *self,
595 struct aq_ring_s *aq_ring,
596 struct aq_ring_param_s *aq_ring_param)
597{
598 u32 dma_desc_lsw_addr = (u32)aq_ring->dx_ring_pa;
599 u32 dma_desc_msw_addr = (u32)(((u64)aq_ring->dx_ring_pa) >> 32);
600
8e1c072f
IR
601 hw_atl_reg_tx_dma_desc_base_addresslswset(self, dma_desc_lsw_addr,
602 aq_ring->idx);
bab6de8f 603
8e1c072f
IR
604 hw_atl_reg_tx_dma_desc_base_addressmswset(self, dma_desc_msw_addr,
605 aq_ring->idx);
bab6de8f 606
8e1c072f 607 hw_atl_tdm_tx_desc_len_set(self, aq_ring->size / 8U, aq_ring->idx);
bab6de8f
DV
608
609 hw_atl_b0_hw_tx_ring_tail_update(self, aq_ring);
610
611 /* Set Tx threshold */
8e1c072f 612 hw_atl_tdm_tx_desc_wr_wb_threshold_set(self, 0U, aq_ring->idx);
bab6de8f
DV
613
614 /* Mapping interrupt vector */
8e1c072f
IR
615 hw_atl_itr_irq_map_tx_set(self, aq_ring_param->vec_idx, aq_ring->idx);
616 hw_atl_itr_irq_map_en_tx_set(self, true, aq_ring->idx);
bab6de8f 617
8e1c072f
IR
618 hw_atl_tdm_cpu_id_set(self, aq_ring_param->cpu, aq_ring->idx);
619 hw_atl_tdm_tx_desc_dca_en_set(self, 0U, aq_ring->idx);
bab6de8f
DV
620
621 return aq_hw_err_from_flags(self);
622}
623
624static int hw_atl_b0_hw_ring_rx_fill(struct aq_hw_s *self,
625 struct aq_ring_s *ring,
626 unsigned int sw_tail_old)
627{
628 for (; sw_tail_old != ring->sw_tail;
629 sw_tail_old = aq_ring_next_dx(ring, sw_tail_old)) {
630 struct hw_atl_rxd_s *rxd =
631 (struct hw_atl_rxd_s *)&ring->dx_ring[sw_tail_old *
632 HW_ATL_B0_RXD_SIZE];
633
634 struct aq_ring_buff_s *buff = &ring->buff_ring[sw_tail_old];
635
636 rxd->buf_addr = buff->pa;
637 rxd->hdr_addr = 0U;
638 }
639
8e1c072f 640 hw_atl_reg_rx_dma_desc_tail_ptr_set(self, sw_tail_old, ring->idx);
bab6de8f
DV
641
642 return aq_hw_err_from_flags(self);
643}
644
645static int hw_atl_b0_hw_ring_tx_head_update(struct aq_hw_s *self,
646 struct aq_ring_s *ring)
647{
648 int err = 0;
8e1c072f 649 unsigned int hw_head_ = hw_atl_tdm_tx_desc_head_ptr_get(self, ring->idx);
bab6de8f 650
78f5193d 651 if (aq_utils_obj_test(&self->flags, AQ_HW_FLAG_ERR_UNPLUG)) {
bab6de8f
DV
652 err = -ENXIO;
653 goto err_exit;
654 }
655 ring->hw_head = hw_head_;
656 err = aq_hw_err_from_flags(self);
657
658err_exit:
659 return err;
660}
661
662static int hw_atl_b0_hw_ring_rx_receive(struct aq_hw_s *self,
663 struct aq_ring_s *ring)
664{
665 struct device *ndev = aq_nic_get_dev(ring->aq_nic);
666
667 for (; ring->hw_head != ring->sw_tail;
668 ring->hw_head = aq_ring_next_dx(ring, ring->hw_head)) {
669 struct aq_ring_buff_s *buff = NULL;
670 struct hw_atl_rxd_wb_s *rxd_wb = (struct hw_atl_rxd_wb_s *)
671 &ring->dx_ring[ring->hw_head * HW_ATL_B0_RXD_SIZE];
672
673 unsigned int is_err = 1U;
674 unsigned int is_rx_check_sum_enabled = 0U;
675 unsigned int pkt_type = 0U;
676
677 if (!(rxd_wb->status & 0x1U)) { /* RxD is not done */
678 break;
679 }
680
681 buff = &ring->buff_ring[ring->hw_head];
682
683 is_err = (0x0000003CU & rxd_wb->status);
684
685 is_rx_check_sum_enabled = (rxd_wb->type) & (0x3U << 19);
686 is_err &= ~0x20U; /* exclude validity bit */
687
688 pkt_type = 0xFFU & (rxd_wb->type >> 4);
689
690 if (is_rx_check_sum_enabled) {
691 if (0x0U == (pkt_type & 0x3U))
692 buff->is_ip_cso = (is_err & 0x08U) ? 0U : 1U;
693
694 if (0x4U == (pkt_type & 0x1CU))
695 buff->is_udp_cso = buff->is_cso_err ? 0U : 1U;
696 else if (0x0U == (pkt_type & 0x1CU))
697 buff->is_tcp_cso = buff->is_cso_err ? 0U : 1U;
0a402e7b
PB
698
699 /* Checksum offload workaround for small packets */
700 if (rxd_wb->pkt_len <= 60) {
701 buff->is_ip_cso = 0U;
702 buff->is_cso_err = 0U;
703 }
bab6de8f
DV
704 }
705
706 is_err &= ~0x18U;
707
708 dma_unmap_page(ndev, buff->pa, buff->len, DMA_FROM_DEVICE);
709
710 if (is_err || rxd_wb->type & 0x1000U) {
711 /* status error or DMA error */
712 buff->is_error = 1U;
713 } else {
714 if (self->aq_nic_cfg->is_rss) {
715 /* last 4 byte */
716 u16 rss_type = rxd_wb->type & 0xFU;
717
718 if (rss_type && rss_type < 0x8U) {
719 buff->is_hash_l4 = (rss_type == 0x4 ||
720 rss_type == 0x5);
721 buff->rss_hash = rxd_wb->rss_hash;
722 }
723 }
724
725 if (HW_ATL_B0_RXD_WB_STAT2_EOP & rxd_wb->status) {
c0788f74
PB
726 buff->len = rxd_wb->pkt_len %
727 AQ_CFG_RX_FRAME_MAX;
bab6de8f
DV
728 buff->len = buff->len ?
729 buff->len : AQ_CFG_RX_FRAME_MAX;
730 buff->next = 0U;
731 buff->is_eop = 1U;
732 } else {
733 if (HW_ATL_B0_RXD_WB_STAT2_RSCCNT &
734 rxd_wb->status) {
735 /* LRO */
736 buff->next = rxd_wb->next_desc_ptr;
737 ++ring->stats.rx.lro_packets;
738 } else {
739 /* jumbo */
740 buff->next =
741 aq_ring_next_dx(ring,
742 ring->hw_head);
743 ++ring->stats.rx.jumbo_packets;
744 }
745 }
746 }
747 }
748
749 return aq_hw_err_from_flags(self);
750}
751
752static int hw_atl_b0_hw_irq_enable(struct aq_hw_s *self, u64 mask)
753{
8e1c072f 754 hw_atl_itr_irq_msk_setlsw_set(self, LODWORD(mask));
bab6de8f
DV
755 return aq_hw_err_from_flags(self);
756}
757
758static int hw_atl_b0_hw_irq_disable(struct aq_hw_s *self, u64 mask)
759{
8e1c072f
IR
760 hw_atl_itr_irq_msk_clearlsw_set(self, LODWORD(mask));
761 hw_atl_itr_irq_status_clearlsw_set(self, LODWORD(mask));
bab6de8f 762
1a713f87 763 atomic_inc(&self->dpc);
bab6de8f
DV
764 return aq_hw_err_from_flags(self);
765}
766
767static int hw_atl_b0_hw_irq_read(struct aq_hw_s *self, u64 *mask)
768{
8e1c072f 769 *mask = hw_atl_itr_irq_statuslsw_get(self);
bab6de8f
DV
770 return aq_hw_err_from_flags(self);
771}
772
773#define IS_FILTER_ENABLED(_F_) ((packet_filter & (_F_)) ? 1U : 0U)
774
775static int hw_atl_b0_hw_packet_filter_set(struct aq_hw_s *self,
776 unsigned int packet_filter)
777{
778 unsigned int i = 0U;
779
8e1c072f
IR
780 hw_atl_rpfl2promiscuous_mode_en_set(self, IS_FILTER_ENABLED(IFF_PROMISC));
781 hw_atl_rpfl2multicast_flr_en_set(self,
782 IS_FILTER_ENABLED(IFF_MULTICAST), 0);
bab6de8f 783
8e1c072f
IR
784 hw_atl_rpfl2_accept_all_mc_packets_set(self,
785 IS_FILTER_ENABLED(IFF_ALLMULTI));
bab6de8f 786
8e1c072f 787 hw_atl_rpfl2broadcast_en_set(self, IS_FILTER_ENABLED(IFF_BROADCAST));
bab6de8f
DV
788
789 self->aq_nic_cfg->is_mc_list_enabled = IS_FILTER_ENABLED(IFF_MULTICAST);
790
791 for (i = HW_ATL_B0_MAC_MIN; i < HW_ATL_B0_MAC_MAX; ++i)
8e1c072f
IR
792 hw_atl_rpfl2_uc_flr_en_set(self,
793 (self->aq_nic_cfg->is_mc_list_enabled &&
bab6de8f
DV
794 (i <= self->aq_nic_cfg->mc_list_count)) ?
795 1U : 0U, i);
796
797 return aq_hw_err_from_flags(self);
798}
799
800#undef IS_FILTER_ENABLED
801
802static int hw_atl_b0_hw_multicast_list_set(struct aq_hw_s *self,
803 u8 ar_mac
804 [AQ_CFG_MULTICAST_ADDRESS_MAX]
805 [ETH_ALEN],
806 u32 count)
807{
808 int err = 0;
809
810 if (count > (HW_ATL_B0_MAC_MAX - HW_ATL_B0_MAC_MIN)) {
811 err = -EBADRQC;
812 goto err_exit;
813 }
814 for (self->aq_nic_cfg->mc_list_count = 0U;
815 self->aq_nic_cfg->mc_list_count < count;
816 ++self->aq_nic_cfg->mc_list_count) {
817 u32 i = self->aq_nic_cfg->mc_list_count;
818 u32 h = (ar_mac[i][0] << 8) | (ar_mac[i][1]);
819 u32 l = (ar_mac[i][2] << 24) | (ar_mac[i][3] << 16) |
820 (ar_mac[i][4] << 8) | ar_mac[i][5];
821
8e1c072f 822 hw_atl_rpfl2_uc_flr_en_set(self, 0U, HW_ATL_B0_MAC_MIN + i);
bab6de8f 823
8e1c072f
IR
824 hw_atl_rpfl2unicast_dest_addresslsw_set(self,
825 l, HW_ATL_B0_MAC_MIN + i);
bab6de8f 826
8e1c072f
IR
827 hw_atl_rpfl2unicast_dest_addressmsw_set(self,
828 h, HW_ATL_B0_MAC_MIN + i);
bab6de8f 829
8e1c072f
IR
830 hw_atl_rpfl2_uc_flr_en_set(self,
831 (self->aq_nic_cfg->is_mc_list_enabled),
bab6de8f
DV
832 HW_ATL_B0_MAC_MIN + i);
833 }
834
835 err = aq_hw_err_from_flags(self);
836
837err_exit:
838 return err;
839}
840
b82ee71a 841static int hw_atl_b0_hw_interrupt_moderation_set(struct aq_hw_s *self)
bab6de8f
DV
842{
843 unsigned int i = 0U;
b82ee71a
IR
844 u32 itr_tx = 2U;
845 u32 itr_rx = 2U;
bab6de8f 846
b82ee71a
IR
847 switch (self->aq_nic_cfg->itr) {
848 case AQ_CFG_INTERRUPT_MODERATION_ON:
849 case AQ_CFG_INTERRUPT_MODERATION_AUTO:
8e1c072f
IR
850 hw_atl_tdm_tx_desc_wr_wb_irq_en_set(self, 0U);
851 hw_atl_tdm_tdm_intr_moder_en_set(self, 1U);
852 hw_atl_rdm_rx_desc_wr_wb_irq_en_set(self, 0U);
853 hw_atl_rdm_rdm_intr_moder_en_set(self, 1U);
bab6de8f 854
b82ee71a
IR
855 if (self->aq_nic_cfg->itr == AQ_CFG_INTERRUPT_MODERATION_ON) {
856 /* HW timers are in 2us units */
857 int tx_max_timer = self->aq_nic_cfg->tx_itr / 2;
858 int tx_min_timer = tx_max_timer / 2;
bab6de8f 859
b82ee71a
IR
860 int rx_max_timer = self->aq_nic_cfg->rx_itr / 2;
861 int rx_min_timer = rx_max_timer / 2;
bab6de8f 862
b82ee71a
IR
863 tx_max_timer = min(HW_ATL_INTR_MODER_MAX, tx_max_timer);
864 tx_min_timer = min(HW_ATL_INTR_MODER_MIN, tx_min_timer);
865 rx_max_timer = min(HW_ATL_INTR_MODER_MAX, rx_max_timer);
866 rx_min_timer = min(HW_ATL_INTR_MODER_MIN, rx_min_timer);
bab6de8f 867
b82ee71a
IR
868 itr_tx |= tx_min_timer << 0x8U;
869 itr_tx |= tx_max_timer << 0x10U;
870 itr_rx |= rx_min_timer << 0x8U;
871 itr_rx |= rx_max_timer << 0x10U;
bab6de8f
DV
872 } else {
873 static unsigned int hw_atl_b0_timers_table_tx_[][2] = {
417a3ae4
IR
874 {0xfU, 0xffU}, /* 10Gbit */
875 {0xfU, 0x1ffU}, /* 5Gbit */
876 {0xfU, 0x1ffU}, /* 5Gbit 5GS */
877 {0xfU, 0x1ffU}, /* 2.5Gbit */
878 {0xfU, 0x1ffU}, /* 1Gbit */
879 {0xfU, 0x1ffU}, /* 100Mbit */
bab6de8f
DV
880 };
881
882 static unsigned int hw_atl_b0_timers_table_rx_[][2] = {
883 {0x6U, 0x38U},/* 10Gbit */
884 {0xCU, 0x70U},/* 5Gbit */
885 {0xCU, 0x70U},/* 5Gbit 5GS */
886 {0x18U, 0xE0U},/* 2.5Gbit */
887 {0x30U, 0x80U},/* 1Gbit */
888 {0x4U, 0x50U},/* 100Mbit */
889 };
890
891 unsigned int speed_index =
892 hw_atl_utils_mbps_2_speed_index(
893 self->aq_link_status.mbps);
894
b82ee71a
IR
895 /* Update user visible ITR settings */
896 self->aq_nic_cfg->tx_itr = hw_atl_b0_timers_table_tx_
897 [speed_index][1] * 2;
898 self->aq_nic_cfg->rx_itr = hw_atl_b0_timers_table_rx_
899 [speed_index][1] * 2;
900
901 itr_tx |= hw_atl_b0_timers_table_tx_
902 [speed_index][0] << 0x8U;
903 itr_tx |= hw_atl_b0_timers_table_tx_
904 [speed_index][1] << 0x10U;
905
906 itr_rx |= hw_atl_b0_timers_table_rx_
907 [speed_index][0] << 0x8U;
908 itr_rx |= hw_atl_b0_timers_table_rx_
909 [speed_index][1] << 0x10U;
bab6de8f 910 }
b82ee71a
IR
911 break;
912 case AQ_CFG_INTERRUPT_MODERATION_OFF:
8e1c072f
IR
913 hw_atl_tdm_tx_desc_wr_wb_irq_en_set(self, 1U);
914 hw_atl_tdm_tdm_intr_moder_en_set(self, 0U);
915 hw_atl_rdm_rx_desc_wr_wb_irq_en_set(self, 1U);
916 hw_atl_rdm_rdm_intr_moder_en_set(self, 0U);
b82ee71a
IR
917 itr_tx = 0U;
918 itr_rx = 0U;
919 break;
bab6de8f
DV
920 }
921
922 for (i = HW_ATL_B0_RINGS_MAX; i--;) {
8e1c072f
IR
923 hw_atl_reg_tx_intr_moder_ctrl_set(self, itr_tx, i);
924 hw_atl_reg_rx_intr_moder_ctrl_set(self, itr_rx, i);
bab6de8f
DV
925 }
926
927 return aq_hw_err_from_flags(self);
928}
929
930static int hw_atl_b0_hw_stop(struct aq_hw_s *self)
931{
932 hw_atl_b0_hw_irq_disable(self, HW_ATL_B0_INT_MASK);
933 return aq_hw_err_from_flags(self);
934}
935
936static int hw_atl_b0_hw_ring_tx_stop(struct aq_hw_s *self,
937 struct aq_ring_s *ring)
938{
8e1c072f 939 hw_atl_tdm_tx_desc_en_set(self, 0U, ring->idx);
bab6de8f
DV
940 return aq_hw_err_from_flags(self);
941}
942
943static int hw_atl_b0_hw_ring_rx_stop(struct aq_hw_s *self,
944 struct aq_ring_s *ring)
945{
8e1c072f 946 hw_atl_rdm_rx_desc_en_set(self, 0U, ring->idx);
bab6de8f
DV
947 return aq_hw_err_from_flags(self);
948}
949
950static int hw_atl_b0_hw_set_speed(struct aq_hw_s *self, u32 speed)
951{
952 int err = 0;
953
954 err = hw_atl_utils_mpi_set_speed(self, speed, MPI_INIT);
955 if (err < 0)
956 goto err_exit;
957
958err_exit:
959 return err;
960}
961
4948293f 962const struct aq_hw_ops hw_atl_ops_b0 = {
bab6de8f
DV
963 .hw_get_mac_permanent = hw_atl_utils_get_mac_permanent,
964 .hw_set_mac_address = hw_atl_b0_hw_mac_addr_set,
965 .hw_get_link_status = hw_atl_utils_mpi_get_link_status,
966 .hw_set_link_speed = hw_atl_b0_hw_set_speed,
967 .hw_init = hw_atl_b0_hw_init,
968 .hw_deinit = hw_atl_utils_hw_deinit,
969 .hw_set_power = hw_atl_utils_hw_set_power,
970 .hw_reset = hw_atl_b0_hw_reset,
971 .hw_start = hw_atl_b0_hw_start,
972 .hw_ring_tx_start = hw_atl_b0_hw_ring_tx_start,
973 .hw_ring_tx_stop = hw_atl_b0_hw_ring_tx_stop,
974 .hw_ring_rx_start = hw_atl_b0_hw_ring_rx_start,
975 .hw_ring_rx_stop = hw_atl_b0_hw_ring_rx_stop,
976 .hw_stop = hw_atl_b0_hw_stop,
977
978 .hw_ring_tx_xmit = hw_atl_b0_hw_ring_tx_xmit,
979 .hw_ring_tx_head_update = hw_atl_b0_hw_ring_tx_head_update,
980
981 .hw_ring_rx_receive = hw_atl_b0_hw_ring_rx_receive,
982 .hw_ring_rx_fill = hw_atl_b0_hw_ring_rx_fill,
983
984 .hw_irq_enable = hw_atl_b0_hw_irq_enable,
985 .hw_irq_disable = hw_atl_b0_hw_irq_disable,
986 .hw_irq_read = hw_atl_b0_hw_irq_read,
987
988 .hw_ring_rx_init = hw_atl_b0_hw_ring_rx_init,
989 .hw_ring_tx_init = hw_atl_b0_hw_ring_tx_init,
990 .hw_packet_filter_set = hw_atl_b0_hw_packet_filter_set,
991 .hw_multicast_list_set = hw_atl_b0_hw_multicast_list_set,
992 .hw_interrupt_moderation_set = hw_atl_b0_hw_interrupt_moderation_set,
993 .hw_rss_set = hw_atl_b0_hw_rss_set,
994 .hw_rss_hash_set = hw_atl_b0_hw_rss_hash_set,
995 .hw_get_regs = hw_atl_utils_hw_get_regs,
65e665e6 996 .hw_update_stats = hw_atl_utils_update_stats,
bab6de8f
DV
997 .hw_get_hw_stats = hw_atl_utils_get_hw_stats,
998 .hw_get_fw_version = hw_atl_utils_get_fw_version,
999};