Commit | Line | Data |
---|---|---|
d2912cb1 | 1 | // SPDX-License-Identifier: GPL-2.0-only |
8127d661 BH |
2 | /**************************************************************************** |
3 | * Driver for Solarflare network controllers and boards | |
4 | * Copyright 2012-2013 Solarflare Communications Inc. | |
8127d661 BH |
5 | */ |
6 | ||
7 | #include "net_driver.h" | |
1751cc36 | 8 | #include "rx_common.h" |
51b35a45 | 9 | #include "tx_common.h" |
8127d661 BH |
10 | #include "ef10_regs.h" |
11 | #include "io.h" | |
12 | #include "mcdi.h" | |
13 | #include "mcdi_pcol.h" | |
83d00531 | 14 | #include "mcdi_port.h" |
e1253f39 | 15 | #include "mcdi_port_common.h" |
37a5f9dc | 16 | #include "mcdi_functions.h" |
8127d661 | 17 | #include "nic.h" |
00aaf7e5 | 18 | #include "mcdi_filters.h" |
8127d661 | 19 | #include "workarounds.h" |
74cd60a4 | 20 | #include "selftest.h" |
7fa8d547 | 21 | #include "ef10_sriov.h" |
8127d661 BH |
22 | #include <linux/in.h> |
23 | #include <linux/jhash.h> | |
24 | #include <linux/wait.h> | |
25 | #include <linux/workqueue.h> | |
205a55f4 | 26 | #include <net/udp_tunnel.h> |
8127d661 BH |
27 | |
28 | /* Hardware control for EF10 architecture including 'Huntington'. */ | |
29 | ||
30 | #define EFX_EF10_DRVGEN_EV 7 | |
31 | enum { | |
32 | EFX_EF10_TEST = 1, | |
33 | EFX_EF10_REFILL, | |
34 | }; | |
dc3273e0 | 35 | |
34813fe2 AR |
36 | /* VLAN list entry */ |
37 | struct efx_ef10_vlan { | |
38 | struct list_head list; | |
39 | u16 vid; | |
40 | }; | |
41 | ||
e5fbd977 | 42 | static int efx_ef10_set_udp_tnl_ports(struct efx_nic *efx, bool unloading); |
205a55f4 | 43 | static const struct udp_tunnel_nic_info efx_ef10_udp_tunnels; |
8127d661 BH |
44 | |
45 | static int efx_ef10_get_warm_boot_count(struct efx_nic *efx) | |
46 | { | |
47 | efx_dword_t reg; | |
48 | ||
49 | efx_readd(efx, ®, ER_DZ_BIU_MC_SFT_STATUS); | |
50 | return EFX_DWORD_FIELD(reg, EFX_WORD_1) == 0xb007 ? | |
51 | EFX_DWORD_FIELD(reg, EFX_WORD_0) : -EIO; | |
52 | } | |
53 | ||
03714bbb EC |
54 | /* On all EF10s up to and including SFC9220 (Medford1), all PFs use BAR 0 for |
55 | * I/O space and BAR 2(&3) for memory. On SFC9250 (Medford2), there is no I/O | |
56 | * bar; PFs use BAR 0/1 for memory. | |
57 | */ | |
58 | static unsigned int efx_ef10_pf_mem_bar(struct efx_nic *efx) | |
59 | { | |
60 | switch (efx->pci_dev->device) { | |
61 | case 0x0b03: /* SFC9250 PF */ | |
62 | return 0; | |
63 | default: | |
64 | return 2; | |
65 | } | |
66 | } | |
67 | ||
68 | /* All VFs use BAR 0/1 for memory */ | |
69 | static unsigned int efx_ef10_vf_mem_bar(struct efx_nic *efx) | |
70 | { | |
71 | return 0; | |
72 | } | |
73 | ||
8127d661 BH |
74 | static unsigned int efx_ef10_mem_map_size(struct efx_nic *efx) |
75 | { | |
02246a7f SS |
76 | int bar; |
77 | ||
03714bbb | 78 | bar = efx->type->mem_bar(efx); |
02246a7f | 79 | return resource_size(&efx->pci_dev->resource[bar]); |
8127d661 BH |
80 | } |
81 | ||
7a186f47 DP |
82 | static bool efx_ef10_is_vf(struct efx_nic *efx) |
83 | { | |
84 | return efx->type->is_vf; | |
85 | } | |
86 | ||
88a37de6 SS |
87 | #ifdef CONFIG_SFC_SRIOV |
88 | static int efx_ef10_get_vf_index(struct efx_nic *efx) | |
89 | { | |
90 | MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_FUNCTION_INFO_OUT_LEN); | |
91 | struct efx_ef10_nic_data *nic_data = efx->nic_data; | |
92 | size_t outlen; | |
93 | int rc; | |
94 | ||
95 | rc = efx_mcdi_rpc(efx, MC_CMD_GET_FUNCTION_INFO, NULL, 0, outbuf, | |
96 | sizeof(outbuf), &outlen); | |
97 | if (rc) | |
98 | return rc; | |
99 | if (outlen < sizeof(outbuf)) | |
100 | return -EIO; | |
101 | ||
102 | nic_data->vf_index = MCDI_DWORD(outbuf, GET_FUNCTION_INFO_OUT_VF); | |
103 | return 0; | |
104 | } | |
105 | #endif | |
106 | ||
e5a2538a | 107 | static int efx_ef10_init_datapath_caps(struct efx_nic *efx) |
8127d661 | 108 | { |
c1be4821 | 109 | MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_CAPABILITIES_V4_OUT_LEN); |
8127d661 BH |
110 | struct efx_ef10_nic_data *nic_data = efx->nic_data; |
111 | size_t outlen; | |
112 | int rc; | |
113 | ||
114 | BUILD_BUG_ON(MC_CMD_GET_CAPABILITIES_IN_LEN != 0); | |
115 | ||
116 | rc = efx_mcdi_rpc(efx, MC_CMD_GET_CAPABILITIES, NULL, 0, | |
117 | outbuf, sizeof(outbuf), &outlen); | |
118 | if (rc) | |
119 | return rc; | |
ca889a05 | 120 | if (outlen < MC_CMD_GET_CAPABILITIES_OUT_LEN) { |
e5a2538a BH |
121 | netif_err(efx, drv, efx->net_dev, |
122 | "unable to read datapath firmware capabilities\n"); | |
123 | return -EIO; | |
124 | } | |
125 | ||
126 | nic_data->datapath_caps = | |
127 | MCDI_DWORD(outbuf, GET_CAPABILITIES_OUT_FLAGS1); | |
8127d661 | 128 | |
c634700f | 129 | if (outlen >= MC_CMD_GET_CAPABILITIES_V2_OUT_LEN) { |
ca889a05 BK |
130 | nic_data->datapath_caps2 = MCDI_DWORD(outbuf, |
131 | GET_CAPABILITIES_V2_OUT_FLAGS2); | |
c634700f EC |
132 | nic_data->piobuf_size = MCDI_WORD(outbuf, |
133 | GET_CAPABILITIES_V2_OUT_SIZE_PIO_BUFF); | |
134 | } else { | |
ca889a05 | 135 | nic_data->datapath_caps2 = 0; |
c634700f EC |
136 | nic_data->piobuf_size = ER_DZ_TX_PIOBUF_SIZE; |
137 | } | |
ca889a05 | 138 | |
8d9f9dd4 DP |
139 | /* record the DPCPU firmware IDs to determine VEB vswitching support. |
140 | */ | |
141 | nic_data->rx_dpcpu_fw_id = | |
142 | MCDI_WORD(outbuf, GET_CAPABILITIES_OUT_RX_DPCPU_FW_ID); | |
143 | nic_data->tx_dpcpu_fw_id = | |
144 | MCDI_WORD(outbuf, GET_CAPABILITIES_OUT_TX_DPCPU_FW_ID); | |
145 | ||
e5a2538a BH |
146 | if (!(nic_data->datapath_caps & |
147 | (1 << MC_CMD_GET_CAPABILITIES_OUT_RX_PREFIX_LEN_14_LBN))) { | |
148 | netif_err(efx, probe, efx->net_dev, | |
149 | "current firmware does not support an RX prefix\n"); | |
150 | return -ENODEV; | |
8127d661 BH |
151 | } |
152 | ||
71827443 EC |
153 | if (outlen >= MC_CMD_GET_CAPABILITIES_V3_OUT_LEN) { |
154 | u8 vi_window_mode = MCDI_BYTE(outbuf, | |
155 | GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE); | |
156 | ||
190c736a AM |
157 | rc = efx_mcdi_window_mode_to_stride(efx, vi_window_mode); |
158 | if (rc) | |
159 | return rc; | |
71827443 EC |
160 | } else { |
161 | /* keep default VI stride */ | |
162 | netif_dbg(efx, probe, efx->net_dev, | |
163 | "firmware did not report VI window mode, assuming vi_stride = %u\n", | |
164 | efx->vi_stride); | |
165 | } | |
166 | ||
c1be4821 EC |
167 | if (outlen >= MC_CMD_GET_CAPABILITIES_V4_OUT_LEN) { |
168 | efx->num_mac_stats = MCDI_WORD(outbuf, | |
169 | GET_CAPABILITIES_V4_OUT_MAC_STATS_NUM_STATS); | |
170 | netif_dbg(efx, probe, efx->net_dev, | |
171 | "firmware reports num_mac_stats = %u\n", | |
172 | efx->num_mac_stats); | |
173 | } else { | |
174 | /* leave num_mac_stats as the default value, MC_CMD_MAC_NSTATS */ | |
175 | netif_dbg(efx, probe, efx->net_dev, | |
176 | "firmware did not report num_mac_stats, assuming %u\n", | |
177 | efx->num_mac_stats); | |
178 | } | |
179 | ||
8127d661 BH |
180 | return 0; |
181 | } | |
182 | ||
50663fe1 MH |
183 | static void efx_ef10_read_licensed_features(struct efx_nic *efx) |
184 | { | |
185 | MCDI_DECLARE_BUF(inbuf, MC_CMD_LICENSING_V3_IN_LEN); | |
186 | MCDI_DECLARE_BUF(outbuf, MC_CMD_LICENSING_V3_OUT_LEN); | |
187 | struct efx_ef10_nic_data *nic_data = efx->nic_data; | |
188 | size_t outlen; | |
189 | int rc; | |
190 | ||
191 | MCDI_SET_DWORD(inbuf, LICENSING_V3_IN_OP, | |
192 | MC_CMD_LICENSING_V3_IN_OP_REPORT_LICENSE); | |
193 | rc = efx_mcdi_rpc_quiet(efx, MC_CMD_LICENSING_V3, inbuf, sizeof(inbuf), | |
194 | outbuf, sizeof(outbuf), &outlen); | |
195 | if (rc || (outlen < MC_CMD_LICENSING_V3_OUT_LEN)) | |
196 | return; | |
197 | ||
198 | nic_data->licensed_features = MCDI_QWORD(outbuf, | |
199 | LICENSING_V3_OUT_LICENSED_FEATURES); | |
200 | } | |
201 | ||
8127d661 BH |
202 | static int efx_ef10_get_sysclk_freq(struct efx_nic *efx) |
203 | { | |
204 | MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_CLOCK_OUT_LEN); | |
205 | int rc; | |
206 | ||
207 | rc = efx_mcdi_rpc(efx, MC_CMD_GET_CLOCK, NULL, 0, | |
208 | outbuf, sizeof(outbuf), NULL); | |
209 | if (rc) | |
210 | return rc; | |
211 | rc = MCDI_DWORD(outbuf, GET_CLOCK_OUT_SYS_FREQ); | |
212 | return rc > 0 ? rc : -ERANGE; | |
213 | } | |
214 | ||
d95e329a BK |
215 | static int efx_ef10_get_timer_workarounds(struct efx_nic *efx) |
216 | { | |
217 | struct efx_ef10_nic_data *nic_data = efx->nic_data; | |
218 | unsigned int implemented; | |
219 | unsigned int enabled; | |
220 | int rc; | |
221 | ||
222 | nic_data->workaround_35388 = false; | |
223 | nic_data->workaround_61265 = false; | |
224 | ||
225 | rc = efx_mcdi_get_workarounds(efx, &implemented, &enabled); | |
226 | ||
227 | if (rc == -ENOSYS) { | |
228 | /* Firmware without GET_WORKAROUNDS - not a problem. */ | |
229 | rc = 0; | |
230 | } else if (rc == 0) { | |
231 | /* Bug61265 workaround is always enabled if implemented. */ | |
232 | if (enabled & MC_CMD_GET_WORKAROUNDS_OUT_BUG61265) | |
233 | nic_data->workaround_61265 = true; | |
234 | ||
235 | if (enabled & MC_CMD_GET_WORKAROUNDS_OUT_BUG35388) { | |
236 | nic_data->workaround_35388 = true; | |
237 | } else if (implemented & MC_CMD_GET_WORKAROUNDS_OUT_BUG35388) { | |
238 | /* Workaround is implemented but not enabled. | |
239 | * Try to enable it. | |
240 | */ | |
241 | rc = efx_mcdi_set_workaround(efx, | |
242 | MC_CMD_WORKAROUND_BUG35388, | |
243 | true, NULL); | |
244 | if (rc == 0) | |
245 | nic_data->workaround_35388 = true; | |
246 | /* If we failed to set the workaround just carry on. */ | |
247 | rc = 0; | |
248 | } | |
249 | } | |
250 | ||
251 | netif_dbg(efx, probe, efx->net_dev, | |
252 | "workaround for bug 35388 is %sabled\n", | |
253 | nic_data->workaround_35388 ? "en" : "dis"); | |
254 | netif_dbg(efx, probe, efx->net_dev, | |
255 | "workaround for bug 61265 is %sabled\n", | |
256 | nic_data->workaround_61265 ? "en" : "dis"); | |
257 | ||
258 | return rc; | |
259 | } | |
260 | ||
261 | static void efx_ef10_process_timer_config(struct efx_nic *efx, | |
262 | const efx_dword_t *data) | |
263 | { | |
264 | unsigned int max_count; | |
265 | ||
266 | if (EFX_EF10_WORKAROUND_61265(efx)) { | |
267 | efx->timer_quantum_ns = MCDI_DWORD(data, | |
268 | GET_EVQ_TMR_PROPERTIES_OUT_MCDI_TMR_STEP_NS); | |
269 | efx->timer_max_ns = MCDI_DWORD(data, | |
270 | GET_EVQ_TMR_PROPERTIES_OUT_MCDI_TMR_MAX_NS); | |
271 | } else if (EFX_EF10_WORKAROUND_35388(efx)) { | |
272 | efx->timer_quantum_ns = MCDI_DWORD(data, | |
273 | GET_EVQ_TMR_PROPERTIES_OUT_BUG35388_TMR_NS_PER_COUNT); | |
274 | max_count = MCDI_DWORD(data, | |
275 | GET_EVQ_TMR_PROPERTIES_OUT_BUG35388_TMR_MAX_COUNT); | |
276 | efx->timer_max_ns = max_count * efx->timer_quantum_ns; | |
277 | } else { | |
278 | efx->timer_quantum_ns = MCDI_DWORD(data, | |
279 | GET_EVQ_TMR_PROPERTIES_OUT_TMR_REG_NS_PER_COUNT); | |
280 | max_count = MCDI_DWORD(data, | |
281 | GET_EVQ_TMR_PROPERTIES_OUT_TMR_REG_MAX_COUNT); | |
282 | efx->timer_max_ns = max_count * efx->timer_quantum_ns; | |
283 | } | |
284 | ||
285 | netif_dbg(efx, probe, efx->net_dev, | |
286 | "got timer properties from MC: quantum %u ns; max %u ns\n", | |
287 | efx->timer_quantum_ns, efx->timer_max_ns); | |
288 | } | |
289 | ||
290 | static int efx_ef10_get_timer_config(struct efx_nic *efx) | |
291 | { | |
292 | MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_LEN); | |
293 | int rc; | |
294 | ||
295 | rc = efx_ef10_get_timer_workarounds(efx); | |
296 | if (rc) | |
297 | return rc; | |
298 | ||
299 | rc = efx_mcdi_rpc_quiet(efx, MC_CMD_GET_EVQ_TMR_PROPERTIES, NULL, 0, | |
300 | outbuf, sizeof(outbuf), NULL); | |
301 | ||
302 | if (rc == 0) { | |
303 | efx_ef10_process_timer_config(efx, outbuf); | |
304 | } else if (rc == -ENOSYS || rc == -EPERM) { | |
305 | /* Not available - fall back to Huntington defaults. */ | |
306 | unsigned int quantum; | |
307 | ||
308 | rc = efx_ef10_get_sysclk_freq(efx); | |
309 | if (rc < 0) | |
310 | return rc; | |
311 | ||
312 | quantum = 1536000 / rc; /* 1536 cycles */ | |
313 | efx->timer_quantum_ns = quantum; | |
314 | efx->timer_max_ns = efx->type->timer_period_max * quantum; | |
315 | rc = 0; | |
316 | } else { | |
317 | efx_mcdi_display_error(efx, MC_CMD_GET_EVQ_TMR_PROPERTIES, | |
318 | MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_LEN, | |
319 | NULL, 0, rc); | |
320 | } | |
321 | ||
322 | return rc; | |
323 | } | |
324 | ||
0d5e0fbb | 325 | static int efx_ef10_get_mac_address_pf(struct efx_nic *efx, u8 *mac_address) |
8127d661 BH |
326 | { |
327 | MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_MAC_ADDRESSES_OUT_LEN); | |
328 | size_t outlen; | |
329 | int rc; | |
330 | ||
331 | BUILD_BUG_ON(MC_CMD_GET_MAC_ADDRESSES_IN_LEN != 0); | |
332 | ||
333 | rc = efx_mcdi_rpc(efx, MC_CMD_GET_MAC_ADDRESSES, NULL, 0, | |
334 | outbuf, sizeof(outbuf), &outlen); | |
335 | if (rc) | |
336 | return rc; | |
337 | if (outlen < MC_CMD_GET_MAC_ADDRESSES_OUT_LEN) | |
338 | return -EIO; | |
339 | ||
cd84ff4d EC |
340 | ether_addr_copy(mac_address, |
341 | MCDI_PTR(outbuf, GET_MAC_ADDRESSES_OUT_MAC_ADDR_BASE)); | |
8127d661 BH |
342 | return 0; |
343 | } | |
344 | ||
0d5e0fbb DP |
345 | static int efx_ef10_get_mac_address_vf(struct efx_nic *efx, u8 *mac_address) |
346 | { | |
347 | MCDI_DECLARE_BUF(inbuf, MC_CMD_VPORT_GET_MAC_ADDRESSES_IN_LEN); | |
348 | MCDI_DECLARE_BUF(outbuf, MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LENMAX); | |
349 | size_t outlen; | |
350 | int num_addrs, rc; | |
351 | ||
352 | MCDI_SET_DWORD(inbuf, VPORT_GET_MAC_ADDRESSES_IN_VPORT_ID, | |
353 | EVB_PORT_ID_ASSIGNED); | |
354 | rc = efx_mcdi_rpc(efx, MC_CMD_VPORT_GET_MAC_ADDRESSES, inbuf, | |
355 | sizeof(inbuf), outbuf, sizeof(outbuf), &outlen); | |
356 | ||
357 | if (rc) | |
358 | return rc; | |
359 | if (outlen < MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LENMIN) | |
360 | return -EIO; | |
361 | ||
362 | num_addrs = MCDI_DWORD(outbuf, | |
363 | VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_COUNT); | |
364 | ||
365 | WARN_ON(num_addrs != 1); | |
366 | ||
367 | ether_addr_copy(mac_address, | |
368 | MCDI_PTR(outbuf, VPORT_GET_MAC_ADDRESSES_OUT_MACADDR)); | |
369 | ||
370 | return 0; | |
371 | } | |
372 | ||
3880fc37 Y |
373 | static ssize_t link_control_flag_show(struct device *dev, |
374 | struct device_attribute *attr, | |
375 | char *buf) | |
0f5c0845 | 376 | { |
3e03a8ba | 377 | struct efx_nic *efx = dev_get_drvdata(dev); |
0f5c0845 SS |
378 | |
379 | return sprintf(buf, "%d\n", | |
380 | ((efx->mcdi->fn_flags) & | |
381 | (1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_LINKCTRL)) | |
382 | ? 1 : 0); | |
383 | } | |
384 | ||
3880fc37 Y |
385 | static ssize_t primary_flag_show(struct device *dev, |
386 | struct device_attribute *attr, | |
387 | char *buf) | |
0f5c0845 | 388 | { |
3e03a8ba | 389 | struct efx_nic *efx = dev_get_drvdata(dev); |
0f5c0845 SS |
390 | |
391 | return sprintf(buf, "%d\n", | |
392 | ((efx->mcdi->fn_flags) & | |
393 | (1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_PRIMARY)) | |
394 | ? 1 : 0); | |
395 | } | |
396 | ||
34813fe2 AR |
397 | static struct efx_ef10_vlan *efx_ef10_find_vlan(struct efx_nic *efx, u16 vid) |
398 | { | |
399 | struct efx_ef10_nic_data *nic_data = efx->nic_data; | |
400 | struct efx_ef10_vlan *vlan; | |
401 | ||
402 | WARN_ON(!mutex_is_locked(&nic_data->vlan_lock)); | |
403 | ||
404 | list_for_each_entry(vlan, &nic_data->vlan_list, list) { | |
405 | if (vlan->vid == vid) | |
406 | return vlan; | |
407 | } | |
408 | ||
409 | return NULL; | |
410 | } | |
411 | ||
412 | static int efx_ef10_add_vlan(struct efx_nic *efx, u16 vid) | |
413 | { | |
414 | struct efx_ef10_nic_data *nic_data = efx->nic_data; | |
415 | struct efx_ef10_vlan *vlan; | |
416 | int rc; | |
417 | ||
418 | mutex_lock(&nic_data->vlan_lock); | |
419 | ||
420 | vlan = efx_ef10_find_vlan(efx, vid); | |
421 | if (vlan) { | |
4a53ea8a AR |
422 | /* We add VID 0 on init. 8021q adds it on module init |
423 | * for all interfaces with VLAN filtring feature. | |
424 | */ | |
425 | if (vid == 0) | |
426 | goto done_unlock; | |
34813fe2 AR |
427 | netif_warn(efx, drv, efx->net_dev, |
428 | "VLAN %u already added\n", vid); | |
429 | rc = -EALREADY; | |
430 | goto fail_exist; | |
431 | } | |
432 | ||
433 | rc = -ENOMEM; | |
434 | vlan = kzalloc(sizeof(*vlan), GFP_KERNEL); | |
435 | if (!vlan) | |
436 | goto fail_alloc; | |
437 | ||
438 | vlan->vid = vid; | |
439 | ||
440 | list_add_tail(&vlan->list, &nic_data->vlan_list); | |
441 | ||
442 | if (efx->filter_state) { | |
443 | mutex_lock(&efx->mac_lock); | |
444 | down_write(&efx->filter_sem); | |
90c914d2 | 445 | rc = efx_mcdi_filter_add_vlan(efx, vlan->vid); |
34813fe2 AR |
446 | up_write(&efx->filter_sem); |
447 | mutex_unlock(&efx->mac_lock); | |
448 | if (rc) | |
449 | goto fail_filter_add_vlan; | |
450 | } | |
451 | ||
4a53ea8a | 452 | done_unlock: |
34813fe2 AR |
453 | mutex_unlock(&nic_data->vlan_lock); |
454 | return 0; | |
455 | ||
456 | fail_filter_add_vlan: | |
457 | list_del(&vlan->list); | |
458 | kfree(vlan); | |
459 | fail_alloc: | |
460 | fail_exist: | |
461 | mutex_unlock(&nic_data->vlan_lock); | |
462 | return rc; | |
463 | } | |
464 | ||
465 | static void efx_ef10_del_vlan_internal(struct efx_nic *efx, | |
466 | struct efx_ef10_vlan *vlan) | |
467 | { | |
468 | struct efx_ef10_nic_data *nic_data = efx->nic_data; | |
469 | ||
470 | WARN_ON(!mutex_is_locked(&nic_data->vlan_lock)); | |
471 | ||
472 | if (efx->filter_state) { | |
473 | down_write(&efx->filter_sem); | |
90c914d2 | 474 | efx_mcdi_filter_del_vlan(efx, vlan->vid); |
34813fe2 AR |
475 | up_write(&efx->filter_sem); |
476 | } | |
477 | ||
478 | list_del(&vlan->list); | |
479 | kfree(vlan); | |
480 | } | |
481 | ||
4a53ea8a AR |
482 | static int efx_ef10_del_vlan(struct efx_nic *efx, u16 vid) |
483 | { | |
484 | struct efx_ef10_nic_data *nic_data = efx->nic_data; | |
485 | struct efx_ef10_vlan *vlan; | |
486 | int rc = 0; | |
487 | ||
488 | /* 8021q removes VID 0 on module unload for all interfaces | |
489 | * with VLAN filtering feature. We need to keep it to receive | |
490 | * untagged traffic. | |
491 | */ | |
492 | if (vid == 0) | |
493 | return 0; | |
494 | ||
495 | mutex_lock(&nic_data->vlan_lock); | |
496 | ||
497 | vlan = efx_ef10_find_vlan(efx, vid); | |
498 | if (!vlan) { | |
499 | netif_err(efx, drv, efx->net_dev, | |
500 | "VLAN %u to be deleted not found\n", vid); | |
501 | rc = -ENOENT; | |
502 | } else { | |
503 | efx_ef10_del_vlan_internal(efx, vlan); | |
504 | } | |
505 | ||
506 | mutex_unlock(&nic_data->vlan_lock); | |
507 | ||
508 | return rc; | |
509 | } | |
510 | ||
34813fe2 AR |
511 | static void efx_ef10_cleanup_vlans(struct efx_nic *efx) |
512 | { | |
513 | struct efx_ef10_nic_data *nic_data = efx->nic_data; | |
514 | struct efx_ef10_vlan *vlan, *next_vlan; | |
515 | ||
516 | mutex_lock(&nic_data->vlan_lock); | |
517 | list_for_each_entry_safe(vlan, next_vlan, &nic_data->vlan_list, list) | |
518 | efx_ef10_del_vlan_internal(efx, vlan); | |
519 | mutex_unlock(&nic_data->vlan_lock); | |
520 | } | |
521 | ||
3880fc37 Y |
522 | static DEVICE_ATTR_RO(link_control_flag); |
523 | static DEVICE_ATTR_RO(primary_flag); | |
0f5c0845 | 524 | |
8127d661 BH |
525 | static int efx_ef10_probe(struct efx_nic *efx) |
526 | { | |
527 | struct efx_ef10_nic_data *nic_data; | |
528 | int i, rc; | |
529 | ||
8127d661 BH |
530 | nic_data = kzalloc(sizeof(*nic_data), GFP_KERNEL); |
531 | if (!nic_data) | |
532 | return -ENOMEM; | |
533 | efx->nic_data = nic_data; | |
534 | ||
75aba2a5 EC |
535 | /* we assume later that we can copy from this buffer in dwords */ |
536 | BUILD_BUG_ON(MCDI_CTL_SDU_LEN_MAX_V2 % 4); | |
537 | ||
8127d661 BH |
538 | rc = efx_nic_alloc_buffer(efx, &nic_data->mcdi_buf, |
539 | 8 + MCDI_CTL_SDU_LEN_MAX_V2, GFP_KERNEL); | |
540 | if (rc) | |
541 | goto fail1; | |
542 | ||
543 | /* Get the MC's warm boot count. In case it's rebooting right | |
544 | * now, be prepared to retry. | |
545 | */ | |
546 | i = 0; | |
547 | for (;;) { | |
548 | rc = efx_ef10_get_warm_boot_count(efx); | |
549 | if (rc >= 0) | |
550 | break; | |
551 | if (++i == 5) | |
552 | goto fail2; | |
553 | ssleep(1); | |
554 | } | |
555 | nic_data->warm_boot_count = rc; | |
556 | ||
8127d661 BH |
557 | /* In case we're recovering from a crash (kexec), we want to |
558 | * cancel any outstanding request by the previous user of this | |
559 | * function. We send a special message using the least | |
560 | * significant bits of the 'high' (doorbell) register. | |
561 | */ | |
562 | _efx_writed(efx, cpu_to_le32(1), ER_DZ_MC_DB_HWRD); | |
563 | ||
564 | rc = efx_mcdi_init(efx); | |
565 | if (rc) | |
566 | goto fail2; | |
567 | ||
e5fbd977 | 568 | mutex_init(&nic_data->udp_tunnels_lock); |
205a55f4 JK |
569 | for (i = 0; i < ARRAY_SIZE(nic_data->udp_tunnels); ++i) |
570 | nic_data->udp_tunnels[i].type = | |
571 | TUNNEL_ENCAP_UDP_PORT_ENTRY_INVALID; | |
e5fbd977 | 572 | |
8127d661 BH |
573 | /* Reset (most) configuration for this function */ |
574 | rc = efx_mcdi_reset(efx, RESET_TYPE_ALL); | |
575 | if (rc) | |
576 | goto fail3; | |
577 | ||
578 | /* Enable event logging */ | |
579 | rc = efx_mcdi_log_ctrl(efx, true, false, 0); | |
580 | if (rc) | |
581 | goto fail3; | |
582 | ||
0f5c0845 SS |
583 | rc = device_create_file(&efx->pci_dev->dev, |
584 | &dev_attr_link_control_flag); | |
1cd9ecbb DP |
585 | if (rc) |
586 | goto fail3; | |
587 | ||
0f5c0845 SS |
588 | rc = device_create_file(&efx->pci_dev->dev, &dev_attr_primary_flag); |
589 | if (rc) | |
590 | goto fail4; | |
591 | ||
31482310 | 592 | rc = efx_get_pf_index(efx, &nic_data->pf_index); |
0f5c0845 SS |
593 | if (rc) |
594 | goto fail5; | |
595 | ||
e5a2538a | 596 | rc = efx_ef10_init_datapath_caps(efx); |
8127d661 | 597 | if (rc < 0) |
0f5c0845 | 598 | goto fail5; |
8127d661 | 599 | |
50663fe1 MH |
600 | efx_ef10_read_licensed_features(efx); |
601 | ||
71827443 | 602 | /* We can have one VI for each vi_stride-byte region. |
85d43fdb EC |
603 | * However, until we use TX option descriptors we need up to four |
604 | * TX queues per channel for different checksumming combinations. | |
71827443 | 605 | */ |
85d43fdb EC |
606 | if (nic_data->datapath_caps & |
607 | (1 << MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_LBN)) | |
608 | efx->tx_queues_per_channel = 4; | |
609 | else | |
610 | efx->tx_queues_per_channel = 2; | |
de5f32e2 EC |
611 | efx->max_vis = efx_ef10_mem_map_size(efx) / efx->vi_stride; |
612 | if (!efx->max_vis) { | |
613 | netif_err(efx, drv, efx->net_dev, "error determining max VIs\n"); | |
614 | rc = -EIO; | |
615 | goto fail5; | |
616 | } | |
617 | efx->max_channels = min_t(unsigned int, EFX_MAX_CHANNELS, | |
f9cac93e | 618 | efx->max_vis / efx->tx_queues_per_channel); |
71827443 EC |
619 | efx->max_tx_channels = efx->max_channels; |
620 | if (WARN_ON(efx->max_channels == 0)) { | |
621 | rc = -EIO; | |
622 | goto fail5; | |
623 | } | |
624 | ||
8127d661 BH |
625 | efx->rx_packet_len_offset = |
626 | ES_DZ_RX_PREFIX_PKTLEN_OFST - ES_DZ_RX_PREFIX_SIZE; | |
627 | ||
6978729f EC |
628 | if (nic_data->datapath_caps & |
629 | (1 << MC_CMD_GET_CAPABILITIES_OUT_RX_INCLUDE_FCS_LBN)) | |
630 | efx->net_dev->hw_features |= NETIF_F_RXFCS; | |
631 | ||
8127d661 BH |
632 | rc = efx_mcdi_port_get_number(efx); |
633 | if (rc < 0) | |
0f5c0845 | 634 | goto fail5; |
8127d661 BH |
635 | efx->port_num = rc; |
636 | ||
0d5e0fbb | 637 | rc = efx->type->get_mac_address(efx, efx->net_dev->perm_addr); |
8127d661 | 638 | if (rc) |
0f5c0845 | 639 | goto fail5; |
8127d661 | 640 | |
d95e329a | 641 | rc = efx_ef10_get_timer_config(efx); |
8127d661 | 642 | if (rc < 0) |
0f5c0845 | 643 | goto fail5; |
8127d661 | 644 | |
8127d661 | 645 | rc = efx_mcdi_mon_probe(efx); |
267d9d73 | 646 | if (rc && rc != -EPERM) |
0f5c0845 | 647 | goto fail5; |
8127d661 | 648 | |
23418dc1 | 649 | efx_ptp_defer_probe_with_channel(efx); |
9aecda95 | 650 | |
1d051e00 SS |
651 | #ifdef CONFIG_SFC_SRIOV |
652 | if ((efx->pci_dev->physfn) && (!efx->pci_dev->is_physfn)) { | |
653 | struct pci_dev *pci_dev_pf = efx->pci_dev->physfn; | |
654 | struct efx_nic *efx_pf = pci_get_drvdata(pci_dev_pf); | |
655 | ||
656 | efx_pf->type->get_mac_address(efx_pf, nic_data->port_id); | |
657 | } else | |
658 | #endif | |
659 | ether_addr_copy(nic_data->port_id, efx->net_dev->perm_addr); | |
660 | ||
34813fe2 AR |
661 | INIT_LIST_HEAD(&nic_data->vlan_list); |
662 | mutex_init(&nic_data->vlan_lock); | |
663 | ||
664 | /* Add unspecified VID to support VLAN filtering being disabled */ | |
665 | rc = efx_ef10_add_vlan(efx, EFX_FILTER_VID_UNSPEC); | |
666 | if (rc) | |
667 | goto fail_add_vid_unspec; | |
668 | ||
4a53ea8a AR |
669 | /* If VLAN filtering is enabled, we need VID 0 to get untagged |
670 | * traffic. It is added automatically if 8021q module is loaded, | |
671 | * but we can't rely on it since module may be not loaded. | |
672 | */ | |
673 | rc = efx_ef10_add_vlan(efx, 0); | |
674 | if (rc) | |
675 | goto fail_add_vid_0; | |
676 | ||
205a55f4 JK |
677 | if (nic_data->datapath_caps & |
678 | (1 << MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_LBN) && | |
679 | efx->mcdi->fn_flags & | |
680 | (1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_TRUSTED)) | |
681 | efx->net_dev->udp_tunnel_nic_info = &efx_ef10_udp_tunnels; | |
682 | ||
8127d661 BH |
683 | return 0; |
684 | ||
4a53ea8a AR |
685 | fail_add_vid_0: |
686 | efx_ef10_cleanup_vlans(efx); | |
34813fe2 AR |
687 | fail_add_vid_unspec: |
688 | mutex_destroy(&nic_data->vlan_lock); | |
689 | efx_ptp_remove(efx); | |
690 | efx_mcdi_mon_remove(efx); | |
0f5c0845 SS |
691 | fail5: |
692 | device_remove_file(&efx->pci_dev->dev, &dev_attr_primary_flag); | |
693 | fail4: | |
694 | device_remove_file(&efx->pci_dev->dev, &dev_attr_link_control_flag); | |
8127d661 | 695 | fail3: |
e5fbd977 JC |
696 | efx_mcdi_detach(efx); |
697 | ||
698 | mutex_lock(&nic_data->udp_tunnels_lock); | |
699 | memset(nic_data->udp_tunnels, 0, sizeof(nic_data->udp_tunnels)); | |
700 | (void)efx_ef10_set_udp_tnl_ports(efx, true); | |
701 | mutex_unlock(&nic_data->udp_tunnels_lock); | |
702 | mutex_destroy(&nic_data->udp_tunnels_lock); | |
703 | ||
8127d661 BH |
704 | efx_mcdi_fini(efx); |
705 | fail2: | |
706 | efx_nic_free_buffer(efx, &nic_data->mcdi_buf); | |
707 | fail1: | |
708 | kfree(nic_data); | |
709 | efx->nic_data = NULL; | |
710 | return rc; | |
711 | } | |
712 | ||
183233be BH |
713 | #ifdef EFX_USE_PIO |
714 | ||
715 | static void efx_ef10_free_piobufs(struct efx_nic *efx) | |
716 | { | |
717 | struct efx_ef10_nic_data *nic_data = efx->nic_data; | |
718 | MCDI_DECLARE_BUF(inbuf, MC_CMD_FREE_PIOBUF_IN_LEN); | |
719 | unsigned int i; | |
720 | int rc; | |
721 | ||
722 | BUILD_BUG_ON(MC_CMD_FREE_PIOBUF_OUT_LEN != 0); | |
723 | ||
724 | for (i = 0; i < nic_data->n_piobufs; i++) { | |
725 | MCDI_SET_DWORD(inbuf, FREE_PIOBUF_IN_PIOBUF_HANDLE, | |
726 | nic_data->piobuf_handle[i]); | |
727 | rc = efx_mcdi_rpc(efx, MC_CMD_FREE_PIOBUF, inbuf, sizeof(inbuf), | |
728 | NULL, 0, NULL); | |
729 | WARN_ON(rc); | |
730 | } | |
731 | ||
732 | nic_data->n_piobufs = 0; | |
733 | } | |
734 | ||
735 | static int efx_ef10_alloc_piobufs(struct efx_nic *efx, unsigned int n) | |
736 | { | |
737 | struct efx_ef10_nic_data *nic_data = efx->nic_data; | |
738 | MCDI_DECLARE_BUF(outbuf, MC_CMD_ALLOC_PIOBUF_OUT_LEN); | |
739 | unsigned int i; | |
740 | size_t outlen; | |
741 | int rc = 0; | |
742 | ||
743 | BUILD_BUG_ON(MC_CMD_ALLOC_PIOBUF_IN_LEN != 0); | |
744 | ||
745 | for (i = 0; i < n; i++) { | |
09a04204 BK |
746 | rc = efx_mcdi_rpc_quiet(efx, MC_CMD_ALLOC_PIOBUF, NULL, 0, |
747 | outbuf, sizeof(outbuf), &outlen); | |
748 | if (rc) { | |
749 | /* Don't display the MC error if we didn't have space | |
750 | * for a VF. | |
751 | */ | |
752 | if (!(efx_ef10_is_vf(efx) && rc == -ENOSPC)) | |
753 | efx_mcdi_display_error(efx, MC_CMD_ALLOC_PIOBUF, | |
754 | 0, outbuf, outlen, rc); | |
183233be | 755 | break; |
09a04204 | 756 | } |
183233be BH |
757 | if (outlen < MC_CMD_ALLOC_PIOBUF_OUT_LEN) { |
758 | rc = -EIO; | |
759 | break; | |
760 | } | |
761 | nic_data->piobuf_handle[i] = | |
762 | MCDI_DWORD(outbuf, ALLOC_PIOBUF_OUT_PIOBUF_HANDLE); | |
763 | netif_dbg(efx, probe, efx->net_dev, | |
764 | "allocated PIO buffer %u handle %x\n", i, | |
765 | nic_data->piobuf_handle[i]); | |
766 | } | |
767 | ||
768 | nic_data->n_piobufs = i; | |
769 | if (rc) | |
770 | efx_ef10_free_piobufs(efx); | |
771 | return rc; | |
772 | } | |
773 | ||
774 | static int efx_ef10_link_piobufs(struct efx_nic *efx) | |
775 | { | |
776 | struct efx_ef10_nic_data *nic_data = efx->nic_data; | |
d0346b03 | 777 | MCDI_DECLARE_BUF(inbuf, MC_CMD_LINK_PIOBUF_IN_LEN); |
183233be BH |
778 | struct efx_channel *channel; |
779 | struct efx_tx_queue *tx_queue; | |
780 | unsigned int offset, index; | |
781 | int rc; | |
782 | ||
783 | BUILD_BUG_ON(MC_CMD_LINK_PIOBUF_OUT_LEN != 0); | |
784 | BUILD_BUG_ON(MC_CMD_UNLINK_PIOBUF_OUT_LEN != 0); | |
785 | ||
786 | /* Link a buffer to each VI in the write-combining mapping */ | |
787 | for (index = 0; index < nic_data->n_piobufs; ++index) { | |
788 | MCDI_SET_DWORD(inbuf, LINK_PIOBUF_IN_PIOBUF_HANDLE, | |
789 | nic_data->piobuf_handle[index]); | |
790 | MCDI_SET_DWORD(inbuf, LINK_PIOBUF_IN_TXQ_INSTANCE, | |
791 | nic_data->pio_write_vi_base + index); | |
792 | rc = efx_mcdi_rpc(efx, MC_CMD_LINK_PIOBUF, | |
793 | inbuf, MC_CMD_LINK_PIOBUF_IN_LEN, | |
794 | NULL, 0, NULL); | |
795 | if (rc) { | |
796 | netif_err(efx, drv, efx->net_dev, | |
797 | "failed to link VI %u to PIO buffer %u (%d)\n", | |
798 | nic_data->pio_write_vi_base + index, index, | |
799 | rc); | |
800 | goto fail; | |
801 | } | |
802 | netif_dbg(efx, probe, efx->net_dev, | |
803 | "linked VI %u to PIO buffer %u\n", | |
804 | nic_data->pio_write_vi_base + index, index); | |
805 | } | |
806 | ||
807 | /* Link a buffer to each TX queue */ | |
808 | efx_for_each_channel(channel, efx) { | |
2935e3c3 EC |
809 | /* Extra channels, even those with TXQs (PTP), do not require |
810 | * PIO resources. | |
811 | */ | |
3990a8ff CM |
812 | if (!channel->type->want_pio || |
813 | channel->channel >= efx->xdp_channel_offset) | |
2935e3c3 | 814 | continue; |
3990a8ff | 815 | |
183233be BH |
816 | efx_for_each_channel_tx_queue(tx_queue, channel) { |
817 | /* We assign the PIO buffers to queues in | |
818 | * reverse order to allow for the following | |
819 | * special case. | |
820 | */ | |
821 | offset = ((efx->tx_channel_offset + efx->n_tx_channels - | |
822 | tx_queue->channel->channel - 1) * | |
823 | efx_piobuf_size); | |
c634700f EC |
824 | index = offset / nic_data->piobuf_size; |
825 | offset = offset % nic_data->piobuf_size; | |
183233be BH |
826 | |
827 | /* When the host page size is 4K, the first | |
828 | * host page in the WC mapping may be within | |
829 | * the same VI page as the last TX queue. We | |
830 | * can only link one buffer to each VI. | |
831 | */ | |
832 | if (tx_queue->queue == nic_data->pio_write_vi_base) { | |
833 | BUG_ON(index != 0); | |
834 | rc = 0; | |
835 | } else { | |
836 | MCDI_SET_DWORD(inbuf, | |
837 | LINK_PIOBUF_IN_PIOBUF_HANDLE, | |
838 | nic_data->piobuf_handle[index]); | |
839 | MCDI_SET_DWORD(inbuf, | |
840 | LINK_PIOBUF_IN_TXQ_INSTANCE, | |
841 | tx_queue->queue); | |
842 | rc = efx_mcdi_rpc(efx, MC_CMD_LINK_PIOBUF, | |
843 | inbuf, MC_CMD_LINK_PIOBUF_IN_LEN, | |
844 | NULL, 0, NULL); | |
845 | } | |
846 | ||
847 | if (rc) { | |
848 | /* This is non-fatal; the TX path just | |
849 | * won't use PIO for this queue | |
850 | */ | |
851 | netif_err(efx, drv, efx->net_dev, | |
852 | "failed to link VI %u to PIO buffer %u (%d)\n", | |
853 | tx_queue->queue, index, rc); | |
854 | tx_queue->piobuf = NULL; | |
855 | } else { | |
856 | tx_queue->piobuf = | |
857 | nic_data->pio_write_base + | |
71827443 | 858 | index * efx->vi_stride + offset; |
183233be BH |
859 | tx_queue->piobuf_offset = offset; |
860 | netif_dbg(efx, probe, efx->net_dev, | |
861 | "linked VI %u to PIO buffer %u offset %x addr %p\n", | |
862 | tx_queue->queue, index, | |
863 | tx_queue->piobuf_offset, | |
864 | tx_queue->piobuf); | |
865 | } | |
866 | } | |
867 | } | |
868 | ||
869 | return 0; | |
870 | ||
871 | fail: | |
d0346b03 EC |
872 | /* inbuf was defined for MC_CMD_LINK_PIOBUF. We can use the same |
873 | * buffer for MC_CMD_UNLINK_PIOBUF because it's shorter. | |
874 | */ | |
875 | BUILD_BUG_ON(MC_CMD_LINK_PIOBUF_IN_LEN < MC_CMD_UNLINK_PIOBUF_IN_LEN); | |
183233be BH |
876 | while (index--) { |
877 | MCDI_SET_DWORD(inbuf, UNLINK_PIOBUF_IN_TXQ_INSTANCE, | |
878 | nic_data->pio_write_vi_base + index); | |
879 | efx_mcdi_rpc(efx, MC_CMD_UNLINK_PIOBUF, | |
880 | inbuf, MC_CMD_UNLINK_PIOBUF_IN_LEN, | |
881 | NULL, 0, NULL); | |
882 | } | |
883 | return rc; | |
884 | } | |
885 | ||
c0795bf6 EC |
886 | static void efx_ef10_forget_old_piobufs(struct efx_nic *efx) |
887 | { | |
888 | struct efx_channel *channel; | |
889 | struct efx_tx_queue *tx_queue; | |
890 | ||
891 | /* All our existing PIO buffers went away */ | |
892 | efx_for_each_channel(channel, efx) | |
893 | efx_for_each_channel_tx_queue(tx_queue, channel) | |
894 | tx_queue->piobuf = NULL; | |
895 | } | |
896 | ||
183233be BH |
897 | #else /* !EFX_USE_PIO */ |
898 | ||
899 | static int efx_ef10_alloc_piobufs(struct efx_nic *efx, unsigned int n) | |
900 | { | |
901 | return n == 0 ? 0 : -ENOBUFS; | |
902 | } | |
903 | ||
904 | static int efx_ef10_link_piobufs(struct efx_nic *efx) | |
905 | { | |
906 | return 0; | |
907 | } | |
908 | ||
909 | static void efx_ef10_free_piobufs(struct efx_nic *efx) | |
910 | { | |
911 | } | |
912 | ||
c0795bf6 EC |
913 | static void efx_ef10_forget_old_piobufs(struct efx_nic *efx) |
914 | { | |
915 | } | |
916 | ||
183233be BH |
917 | #endif /* EFX_USE_PIO */ |
918 | ||
8127d661 BH |
919 | static void efx_ef10_remove(struct efx_nic *efx) |
920 | { | |
921 | struct efx_ef10_nic_data *nic_data = efx->nic_data; | |
922 | int rc; | |
923 | ||
f1122a34 SS |
924 | #ifdef CONFIG_SFC_SRIOV |
925 | struct efx_ef10_nic_data *nic_data_pf; | |
926 | struct pci_dev *pci_dev_pf; | |
927 | struct efx_nic *efx_pf; | |
928 | struct ef10_vf *vf; | |
929 | ||
930 | if (efx->pci_dev->is_virtfn) { | |
931 | pci_dev_pf = efx->pci_dev->physfn; | |
932 | if (pci_dev_pf) { | |
933 | efx_pf = pci_get_drvdata(pci_dev_pf); | |
934 | nic_data_pf = efx_pf->nic_data; | |
935 | vf = nic_data_pf->vf + nic_data->vf_index; | |
936 | vf->efx = NULL; | |
937 | } else | |
938 | netif_info(efx, drv, efx->net_dev, | |
939 | "Could not get the PF id from VF\n"); | |
940 | } | |
941 | #endif | |
942 | ||
34813fe2 AR |
943 | efx_ef10_cleanup_vlans(efx); |
944 | mutex_destroy(&nic_data->vlan_lock); | |
945 | ||
9aecda95 BH |
946 | efx_ptp_remove(efx); |
947 | ||
8127d661 BH |
948 | efx_mcdi_mon_remove(efx); |
949 | ||
90c914d2 | 950 | efx_mcdi_rx_free_indir_table(efx); |
8127d661 | 951 | |
183233be BH |
952 | if (nic_data->wc_membase) |
953 | iounmap(nic_data->wc_membase); | |
954 | ||
37a5f9dc | 955 | rc = efx_mcdi_free_vis(efx); |
8127d661 BH |
956 | WARN_ON(rc != 0); |
957 | ||
183233be BH |
958 | if (!nic_data->must_restore_piobufs) |
959 | efx_ef10_free_piobufs(efx); | |
960 | ||
0f5c0845 SS |
961 | device_remove_file(&efx->pci_dev->dev, &dev_attr_primary_flag); |
962 | device_remove_file(&efx->pci_dev->dev, &dev_attr_link_control_flag); | |
963 | ||
e5fbd977 JC |
964 | efx_mcdi_detach(efx); |
965 | ||
966 | memset(nic_data->udp_tunnels, 0, sizeof(nic_data->udp_tunnels)); | |
967 | mutex_lock(&nic_data->udp_tunnels_lock); | |
968 | (void)efx_ef10_set_udp_tnl_ports(efx, true); | |
969 | mutex_unlock(&nic_data->udp_tunnels_lock); | |
970 | ||
971 | mutex_destroy(&nic_data->udp_tunnels_lock); | |
972 | ||
8127d661 BH |
973 | efx_mcdi_fini(efx); |
974 | efx_nic_free_buffer(efx, &nic_data->mcdi_buf); | |
975 | kfree(nic_data); | |
976 | } | |
977 | ||
88a37de6 SS |
978 | static int efx_ef10_probe_pf(struct efx_nic *efx) |
979 | { | |
980 | return efx_ef10_probe(efx); | |
981 | } | |
982 | ||
38d27f38 AR |
983 | int efx_ef10_vadaptor_query(struct efx_nic *efx, unsigned int port_id, |
984 | u32 *port_flags, u32 *vadaptor_flags, | |
985 | unsigned int *vlan_tags) | |
986 | { | |
987 | struct efx_ef10_nic_data *nic_data = efx->nic_data; | |
988 | MCDI_DECLARE_BUF(inbuf, MC_CMD_VADAPTOR_QUERY_IN_LEN); | |
989 | MCDI_DECLARE_BUF(outbuf, MC_CMD_VADAPTOR_QUERY_OUT_LEN); | |
990 | size_t outlen; | |
991 | int rc; | |
992 | ||
993 | if (nic_data->datapath_caps & | |
994 | (1 << MC_CMD_GET_CAPABILITIES_OUT_VADAPTOR_QUERY_LBN)) { | |
995 | MCDI_SET_DWORD(inbuf, VADAPTOR_QUERY_IN_UPSTREAM_PORT_ID, | |
996 | port_id); | |
997 | ||
998 | rc = efx_mcdi_rpc(efx, MC_CMD_VADAPTOR_QUERY, inbuf, sizeof(inbuf), | |
999 | outbuf, sizeof(outbuf), &outlen); | |
1000 | if (rc) | |
1001 | return rc; | |
1002 | ||
1003 | if (outlen < sizeof(outbuf)) { | |
1004 | rc = -EIO; | |
1005 | return rc; | |
1006 | } | |
1007 | } | |
1008 | ||
1009 | if (port_flags) | |
1010 | *port_flags = MCDI_DWORD(outbuf, VADAPTOR_QUERY_OUT_PORT_FLAGS); | |
1011 | if (vadaptor_flags) | |
1012 | *vadaptor_flags = | |
1013 | MCDI_DWORD(outbuf, VADAPTOR_QUERY_OUT_VADAPTOR_FLAGS); | |
1014 | if (vlan_tags) | |
1015 | *vlan_tags = | |
1016 | MCDI_DWORD(outbuf, | |
1017 | VADAPTOR_QUERY_OUT_NUM_AVAILABLE_VLAN_TAGS); | |
1018 | ||
1019 | return 0; | |
1020 | } | |
1021 | ||
7a186f47 DP |
1022 | int efx_ef10_vadaptor_alloc(struct efx_nic *efx, unsigned int port_id) |
1023 | { | |
1024 | MCDI_DECLARE_BUF(inbuf, MC_CMD_VADAPTOR_ALLOC_IN_LEN); | |
1025 | ||
1026 | MCDI_SET_DWORD(inbuf, VADAPTOR_ALLOC_IN_UPSTREAM_PORT_ID, port_id); | |
1027 | return efx_mcdi_rpc(efx, MC_CMD_VADAPTOR_ALLOC, inbuf, sizeof(inbuf), | |
1028 | NULL, 0, NULL); | |
1029 | } | |
1030 | ||
1031 | int efx_ef10_vadaptor_free(struct efx_nic *efx, unsigned int port_id) | |
1032 | { | |
1033 | MCDI_DECLARE_BUF(inbuf, MC_CMD_VADAPTOR_FREE_IN_LEN); | |
1034 | ||
1035 | MCDI_SET_DWORD(inbuf, VADAPTOR_FREE_IN_UPSTREAM_PORT_ID, port_id); | |
1036 | return efx_mcdi_rpc(efx, MC_CMD_VADAPTOR_FREE, inbuf, sizeof(inbuf), | |
1037 | NULL, 0, NULL); | |
1038 | } | |
1039 | ||
1040 | int efx_ef10_vport_add_mac(struct efx_nic *efx, | |
76660757 | 1041 | unsigned int port_id, const u8 *mac) |
7a186f47 DP |
1042 | { |
1043 | MCDI_DECLARE_BUF(inbuf, MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_LEN); | |
1044 | ||
1045 | MCDI_SET_DWORD(inbuf, VPORT_ADD_MAC_ADDRESS_IN_VPORT_ID, port_id); | |
1046 | ether_addr_copy(MCDI_PTR(inbuf, VPORT_ADD_MAC_ADDRESS_IN_MACADDR), mac); | |
1047 | ||
1048 | return efx_mcdi_rpc(efx, MC_CMD_VPORT_ADD_MAC_ADDRESS, inbuf, | |
1049 | sizeof(inbuf), NULL, 0, NULL); | |
1050 | } | |
1051 | ||
1052 | int efx_ef10_vport_del_mac(struct efx_nic *efx, | |
76660757 | 1053 | unsigned int port_id, const u8 *mac) |
7a186f47 DP |
1054 | { |
1055 | MCDI_DECLARE_BUF(inbuf, MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_LEN); | |
1056 | ||
1057 | MCDI_SET_DWORD(inbuf, VPORT_DEL_MAC_ADDRESS_IN_VPORT_ID, port_id); | |
1058 | ether_addr_copy(MCDI_PTR(inbuf, VPORT_DEL_MAC_ADDRESS_IN_MACADDR), mac); | |
1059 | ||
1060 | return efx_mcdi_rpc(efx, MC_CMD_VPORT_DEL_MAC_ADDRESS, inbuf, | |
1061 | sizeof(inbuf), NULL, 0, NULL); | |
1062 | } | |
1063 | ||
88a37de6 SS |
1064 | #ifdef CONFIG_SFC_SRIOV |
1065 | static int efx_ef10_probe_vf(struct efx_nic *efx) | |
1066 | { | |
1067 | int rc; | |
6598dad2 DP |
1068 | struct pci_dev *pci_dev_pf; |
1069 | ||
1070 | /* If the parent PF has no VF data structure, it doesn't know about this | |
1071 | * VF so fail probe. The VF needs to be re-created. This can happen | |
9a022e76 ÍH |
1072 | * if the PF driver was unloaded while any VF was assigned to a guest |
1073 | * (using Xen, only). | |
6598dad2 DP |
1074 | */ |
1075 | pci_dev_pf = efx->pci_dev->physfn; | |
1076 | if (pci_dev_pf) { | |
1077 | struct efx_nic *efx_pf = pci_get_drvdata(pci_dev_pf); | |
1078 | struct efx_ef10_nic_data *nic_data_pf = efx_pf->nic_data; | |
1079 | ||
1080 | if (!nic_data_pf->vf) { | |
1081 | netif_info(efx, drv, efx->net_dev, | |
1082 | "The VF cannot link to its parent PF; " | |
1083 | "please destroy and re-create the VF\n"); | |
1084 | return -EBUSY; | |
1085 | } | |
1086 | } | |
88a37de6 SS |
1087 | |
1088 | rc = efx_ef10_probe(efx); | |
1089 | if (rc) | |
1090 | return rc; | |
1091 | ||
1092 | rc = efx_ef10_get_vf_index(efx); | |
1093 | if (rc) | |
1094 | goto fail; | |
1095 | ||
f1122a34 SS |
1096 | if (efx->pci_dev->is_virtfn) { |
1097 | if (efx->pci_dev->physfn) { | |
1098 | struct efx_nic *efx_pf = | |
1099 | pci_get_drvdata(efx->pci_dev->physfn); | |
1100 | struct efx_ef10_nic_data *nic_data_p = efx_pf->nic_data; | |
1101 | struct efx_ef10_nic_data *nic_data = efx->nic_data; | |
1102 | ||
1103 | nic_data_p->vf[nic_data->vf_index].efx = efx; | |
6598dad2 DP |
1104 | nic_data_p->vf[nic_data->vf_index].pci_dev = |
1105 | efx->pci_dev; | |
f1122a34 SS |
1106 | } else |
1107 | netif_info(efx, drv, efx->net_dev, | |
1108 | "Could not get the PF id from VF\n"); | |
1109 | } | |
1110 | ||
88a37de6 SS |
1111 | return 0; |
1112 | ||
1113 | fail: | |
1114 | efx_ef10_remove(efx); | |
1115 | return rc; | |
1116 | } | |
1117 | #else | |
1118 | static int efx_ef10_probe_vf(struct efx_nic *efx __attribute__ ((unused))) | |
1119 | { | |
1120 | return 0; | |
1121 | } | |
1122 | #endif | |
1123 | ||
8127d661 BH |
1124 | static int efx_ef10_alloc_vis(struct efx_nic *efx, |
1125 | unsigned int min_vis, unsigned int max_vis) | |
1126 | { | |
8127d661 | 1127 | struct efx_ef10_nic_data *nic_data = efx->nic_data; |
8127d661 | 1128 | |
37a5f9dc AM |
1129 | return efx_mcdi_alloc_vis(efx, min_vis, max_vis, &nic_data->vi_base, |
1130 | &nic_data->n_allocated_vis); | |
8127d661 BH |
1131 | } |
1132 | ||
183233be BH |
1133 | /* Note that the failure path of this function does not free |
1134 | * resources, as this will be done by efx_ef10_remove(). | |
1135 | */ | |
8127d661 BH |
1136 | static int efx_ef10_dimension_resources(struct efx_nic *efx) |
1137 | { | |
f9cac93e EC |
1138 | unsigned int min_vis = max_t(unsigned int, efx->tx_queues_per_channel, |
1139 | efx_separate_tx_channels ? 2 : 1); | |
1140 | unsigned int channel_vis, pio_write_vi_base, max_vis; | |
183233be BH |
1141 | struct efx_ef10_nic_data *nic_data = efx->nic_data; |
1142 | unsigned int uc_mem_map_size, wc_mem_map_size; | |
183233be BH |
1143 | void __iomem *membase; |
1144 | int rc; | |
1145 | ||
2935e3c3 | 1146 | channel_vis = max(efx->n_channels, |
3990a8ff | 1147 | ((efx->n_tx_channels + efx->n_extra_tx_channels) * |
f9cac93e | 1148 | efx->tx_queues_per_channel) + |
3990a8ff | 1149 | efx->n_xdp_channels * efx->xdp_tx_per_channel); |
de5f32e2 EC |
1150 | if (efx->max_vis && efx->max_vis < channel_vis) { |
1151 | netif_dbg(efx, drv, efx->net_dev, | |
1152 | "Reducing channel VIs from %u to %u\n", | |
1153 | channel_vis, efx->max_vis); | |
1154 | channel_vis = efx->max_vis; | |
1155 | } | |
8127d661 | 1156 | |
183233be BH |
1157 | #ifdef EFX_USE_PIO |
1158 | /* Try to allocate PIO buffers if wanted and if the full | |
1159 | * number of PIO buffers would be sufficient to allocate one | |
1160 | * copy-buffer per TX channel. Failure is non-fatal, as there | |
1161 | * are only a small number of PIO buffers shared between all | |
1162 | * functions of the controller. | |
1163 | */ | |
1164 | if (efx_piobuf_size != 0 && | |
c634700f | 1165 | nic_data->piobuf_size / efx_piobuf_size * EF10_TX_PIOBUF_COUNT >= |
183233be BH |
1166 | efx->n_tx_channels) { |
1167 | unsigned int n_piobufs = | |
1168 | DIV_ROUND_UP(efx->n_tx_channels, | |
c634700f | 1169 | nic_data->piobuf_size / efx_piobuf_size); |
183233be BH |
1170 | |
1171 | rc = efx_ef10_alloc_piobufs(efx, n_piobufs); | |
6eacfb54 TP |
1172 | if (rc == -ENOSPC) |
1173 | netif_dbg(efx, probe, efx->net_dev, | |
1174 | "out of PIO buffers; cannot allocate more\n"); | |
1175 | else if (rc == -EPERM) | |
1176 | netif_dbg(efx, probe, efx->net_dev, | |
1177 | "not permitted to allocate PIO buffers\n"); | |
1178 | else if (rc) | |
183233be BH |
1179 | netif_err(efx, probe, efx->net_dev, |
1180 | "failed to allocate PIO buffers (%d)\n", rc); | |
1181 | else | |
1182 | netif_dbg(efx, probe, efx->net_dev, | |
1183 | "allocated %u PIO buffers\n", n_piobufs); | |
1184 | } | |
1185 | #else | |
1186 | nic_data->n_piobufs = 0; | |
1187 | #endif | |
1188 | ||
1189 | /* PIO buffers should be mapped with write-combining enabled, | |
1190 | * and we want to make single UC and WC mappings rather than | |
1191 | * several of each (in fact that's the only option if host | |
1192 | * page size is >4K). So we may allocate some extra VIs just | |
1193 | * for writing PIO buffers through. | |
52ad762b | 1194 | * |
b0fbdae1 | 1195 | * The UC mapping contains (channel_vis - 1) complete VIs and the |
71827443 EC |
1196 | * first 4K of the next VI. Then the WC mapping begins with |
1197 | * the remainder of this last VI. | |
183233be | 1198 | */ |
71827443 | 1199 | uc_mem_map_size = PAGE_ALIGN((channel_vis - 1) * efx->vi_stride + |
183233be BH |
1200 | ER_DZ_TX_PIOBUF); |
1201 | if (nic_data->n_piobufs) { | |
52ad762b DP |
1202 | /* pio_write_vi_base rounds down to give the number of complete |
1203 | * VIs inside the UC mapping. | |
1204 | */ | |
71827443 | 1205 | pio_write_vi_base = uc_mem_map_size / efx->vi_stride; |
183233be BH |
1206 | wc_mem_map_size = (PAGE_ALIGN((pio_write_vi_base + |
1207 | nic_data->n_piobufs) * | |
71827443 | 1208 | efx->vi_stride) - |
183233be BH |
1209 | uc_mem_map_size); |
1210 | max_vis = pio_write_vi_base + nic_data->n_piobufs; | |
1211 | } else { | |
1212 | pio_write_vi_base = 0; | |
1213 | wc_mem_map_size = 0; | |
b0fbdae1 | 1214 | max_vis = channel_vis; |
183233be BH |
1215 | } |
1216 | ||
1217 | /* In case the last attached driver failed to free VIs, do it now */ | |
37a5f9dc | 1218 | rc = efx_mcdi_free_vis(efx); |
183233be BH |
1219 | if (rc != 0) |
1220 | return rc; | |
1221 | ||
1222 | rc = efx_ef10_alloc_vis(efx, min_vis, max_vis); | |
1223 | if (rc != 0) | |
1224 | return rc; | |
1225 | ||
b0fbdae1 SS |
1226 | if (nic_data->n_allocated_vis < channel_vis) { |
1227 | netif_info(efx, drv, efx->net_dev, | |
1228 | "Could not allocate enough VIs to satisfy RSS" | |
1229 | " requirements. Performance may not be optimal.\n"); | |
1230 | /* We didn't get the VIs to populate our channels. | |
1231 | * We could keep what we got but then we'd have more | |
1232 | * interrupts than we need. | |
1233 | * Instead calculate new max_channels and restart | |
1234 | */ | |
1235 | efx->max_channels = nic_data->n_allocated_vis; | |
1236 | efx->max_tx_channels = | |
f9cac93e | 1237 | nic_data->n_allocated_vis / efx->tx_queues_per_channel; |
b0fbdae1 | 1238 | |
37a5f9dc | 1239 | efx_mcdi_free_vis(efx); |
b0fbdae1 SS |
1240 | return -EAGAIN; |
1241 | } | |
1242 | ||
183233be BH |
1243 | /* If we didn't get enough VIs to map all the PIO buffers, free the |
1244 | * PIO buffers | |
1245 | */ | |
1246 | if (nic_data->n_piobufs && | |
1247 | nic_data->n_allocated_vis < | |
1248 | pio_write_vi_base + nic_data->n_piobufs) { | |
1249 | netif_dbg(efx, probe, efx->net_dev, | |
1250 | "%u VIs are not sufficient to map %u PIO buffers\n", | |
1251 | nic_data->n_allocated_vis, nic_data->n_piobufs); | |
1252 | efx_ef10_free_piobufs(efx); | |
1253 | } | |
1254 | ||
1255 | /* Shrink the original UC mapping of the memory BAR */ | |
4bdc0d67 | 1256 | membase = ioremap(efx->membase_phys, uc_mem_map_size); |
183233be BH |
1257 | if (!membase) { |
1258 | netif_err(efx, probe, efx->net_dev, | |
1259 | "could not shrink memory BAR to %x\n", | |
1260 | uc_mem_map_size); | |
1261 | return -ENOMEM; | |
1262 | } | |
1263 | iounmap(efx->membase); | |
1264 | efx->membase = membase; | |
1265 | ||
1266 | /* Set up the WC mapping if needed */ | |
1267 | if (wc_mem_map_size) { | |
1268 | nic_data->wc_membase = ioremap_wc(efx->membase_phys + | |
1269 | uc_mem_map_size, | |
1270 | wc_mem_map_size); | |
1271 | if (!nic_data->wc_membase) { | |
1272 | netif_err(efx, probe, efx->net_dev, | |
1273 | "could not allocate WC mapping of size %x\n", | |
1274 | wc_mem_map_size); | |
1275 | return -ENOMEM; | |
1276 | } | |
1277 | nic_data->pio_write_vi_base = pio_write_vi_base; | |
1278 | nic_data->pio_write_base = | |
1279 | nic_data->wc_membase + | |
71827443 | 1280 | (pio_write_vi_base * efx->vi_stride + ER_DZ_TX_PIOBUF - |
183233be BH |
1281 | uc_mem_map_size); |
1282 | ||
1283 | rc = efx_ef10_link_piobufs(efx); | |
1284 | if (rc) | |
1285 | efx_ef10_free_piobufs(efx); | |
1286 | } | |
1287 | ||
1288 | netif_dbg(efx, probe, efx->net_dev, | |
1289 | "memory BAR at %pa (virtual %p+%x UC, %p+%x WC)\n", | |
1290 | &efx->membase_phys, efx->membase, uc_mem_map_size, | |
1291 | nic_data->wc_membase, wc_mem_map_size); | |
1292 | ||
1293 | return 0; | |
8127d661 BH |
1294 | } |
1295 | ||
d3142c19 EC |
1296 | static void efx_ef10_fini_nic(struct efx_nic *efx) |
1297 | { | |
1298 | struct efx_ef10_nic_data *nic_data = efx->nic_data; | |
1299 | ||
d1b35543 | 1300 | spin_lock_bh(&efx->stats_lock); |
d3142c19 EC |
1301 | kfree(nic_data->mc_stats); |
1302 | nic_data->mc_stats = NULL; | |
d1b35543 | 1303 | spin_unlock_bh(&efx->stats_lock); |
d3142c19 EC |
1304 | } |
1305 | ||
8127d661 BH |
1306 | static int efx_ef10_init_nic(struct efx_nic *efx) |
1307 | { | |
1308 | struct efx_ef10_nic_data *nic_data = efx->nic_data; | |
ca4a80e4 ÍH |
1309 | struct net_device *net_dev = efx->net_dev; |
1310 | netdev_features_t tun_feats, tso_feats; | |
8127d661 BH |
1311 | int rc; |
1312 | ||
a915ccc9 BH |
1313 | if (nic_data->must_check_datapath_caps) { |
1314 | rc = efx_ef10_init_datapath_caps(efx); | |
1315 | if (rc) | |
1316 | return rc; | |
1317 | nic_data->must_check_datapath_caps = false; | |
1318 | } | |
1319 | ||
e4fe938c | 1320 | if (efx->must_realloc_vis) { |
8127d661 BH |
1321 | /* We cannot let the number of VIs change now */ |
1322 | rc = efx_ef10_alloc_vis(efx, nic_data->n_allocated_vis, | |
1323 | nic_data->n_allocated_vis); | |
1324 | if (rc) | |
1325 | return rc; | |
e4fe938c | 1326 | efx->must_realloc_vis = false; |
8127d661 BH |
1327 | } |
1328 | ||
d3142c19 EC |
1329 | nic_data->mc_stats = kmalloc(efx->num_mac_stats * sizeof(__le64), |
1330 | GFP_KERNEL); | |
1331 | if (!nic_data->mc_stats) | |
1332 | return -ENOMEM; | |
1333 | ||
183233be BH |
1334 | if (nic_data->must_restore_piobufs && nic_data->n_piobufs) { |
1335 | rc = efx_ef10_alloc_piobufs(efx, nic_data->n_piobufs); | |
1336 | if (rc == 0) { | |
1337 | rc = efx_ef10_link_piobufs(efx); | |
1338 | if (rc) | |
1339 | efx_ef10_free_piobufs(efx); | |
1340 | } | |
1341 | ||
6eacfb54 TP |
1342 | /* Log an error on failure, but this is non-fatal. |
1343 | * Permission errors are less important - we've presumably | |
1344 | * had the PIO buffer licence removed. | |
1345 | */ | |
1346 | if (rc == -EPERM) | |
1347 | netif_dbg(efx, drv, efx->net_dev, | |
1348 | "not permitted to restore PIO buffers\n"); | |
1349 | else if (rc) | |
183233be BH |
1350 | netif_err(efx, drv, efx->net_dev, |
1351 | "failed to restore PIO buffers (%d)\n", rc); | |
1352 | nic_data->must_restore_piobufs = false; | |
1353 | } | |
1354 | ||
ca4a80e4 | 1355 | /* encap features might change during reset if fw variant changed */ |
24b2c375 | 1356 | if (efx_has_cap(efx, VXLAN_NVGRE) && !efx_ef10_is_vf(efx)) |
ca4a80e4 ÍH |
1357 | net_dev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; |
1358 | else | |
1359 | net_dev->hw_enc_features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM); | |
24b2c375 | 1360 | |
ca4a80e4 ÍH |
1361 | tun_feats = NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE | |
1362 | NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM; | |
1363 | tso_feats = NETIF_F_TSO | NETIF_F_TSO6; | |
24b2c375 | 1364 | |
ca4a80e4 ÍH |
1365 | if (efx_has_cap(efx, TX_TSO_V2_ENCAP)) { |
1366 | /* If this is first nic_init, or if it is a reset and a new fw | |
1367 | * variant has added new features, enable them by default. | |
1368 | * If the features are not new, maintain their current value. | |
1369 | */ | |
1370 | if (!(net_dev->hw_features & tun_feats)) | |
1371 | net_dev->features |= tun_feats; | |
1372 | net_dev->hw_enc_features |= tun_feats | tso_feats; | |
1373 | net_dev->hw_features |= tun_feats; | |
1374 | } else { | |
1375 | net_dev->hw_enc_features &= ~(tun_feats | tso_feats); | |
1376 | net_dev->hw_features &= ~tun_feats; | |
1377 | net_dev->features &= ~tun_feats; | |
24b2c375 | 1378 | } |
24b2c375 | 1379 | |
267c0157 | 1380 | /* don't fail init if RSS setup doesn't work */ |
42356d9a EC |
1381 | rc = efx->type->rx_push_rss_config(efx, false, |
1382 | efx->rss_context.rx_indir_table, NULL); | |
267c0157 | 1383 | |
8127d661 BH |
1384 | return 0; |
1385 | } | |
1386 | ||
90c914d2 | 1387 | static void efx_ef10_table_reset_mc_allocations(struct efx_nic *efx) |
3e336261 JC |
1388 | { |
1389 | struct efx_ef10_nic_data *nic_data = efx->nic_data; | |
774ad031 DP |
1390 | #ifdef CONFIG_SFC_SRIOV |
1391 | unsigned int i; | |
1392 | #endif | |
3e336261 JC |
1393 | |
1394 | /* All our allocations have been reset */ | |
e4fe938c EC |
1395 | efx->must_realloc_vis = true; |
1396 | efx_mcdi_filter_table_reset_mc_allocations(efx); | |
3e336261 | 1397 | nic_data->must_restore_piobufs = true; |
c0795bf6 | 1398 | efx_ef10_forget_old_piobufs(efx); |
a9ee8d4a | 1399 | efx->rss_context.priv.context_id = EFX_MCDI_RSS_CONTEXT_INVALID; |
774ad031 DP |
1400 | |
1401 | /* Driver-created vswitches and vports must be re-created */ | |
1402 | nic_data->must_probe_vswitching = true; | |
dfcabb07 | 1403 | efx->vport_id = EVB_PORT_ID_ASSIGNED; |
774ad031 DP |
1404 | #ifdef CONFIG_SFC_SRIOV |
1405 | if (nic_data->vf) | |
1406 | for (i = 0; i < efx->vf_count; i++) | |
1407 | nic_data->vf[i].vport_id = 0; | |
1408 | #endif | |
3e336261 JC |
1409 | } |
1410 | ||
087e9025 JC |
1411 | static enum reset_type efx_ef10_map_reset_reason(enum reset_type reason) |
1412 | { | |
1413 | if (reason == RESET_TYPE_MC_FAILURE) | |
1414 | return RESET_TYPE_DATAPATH; | |
1415 | ||
1416 | return efx_mcdi_map_reset_reason(reason); | |
1417 | } | |
1418 | ||
8127d661 BH |
1419 | static int efx_ef10_map_reset_flags(u32 *flags) |
1420 | { | |
1421 | enum { | |
1422 | EF10_RESET_PORT = ((ETH_RESET_MAC | ETH_RESET_PHY) << | |
1423 | ETH_RESET_SHARED_SHIFT), | |
1424 | EF10_RESET_MC = ((ETH_RESET_DMA | ETH_RESET_FILTER | | |
1425 | ETH_RESET_OFFLOAD | ETH_RESET_MAC | | |
1426 | ETH_RESET_PHY | ETH_RESET_MGMT) << | |
1427 | ETH_RESET_SHARED_SHIFT) | |
1428 | }; | |
1429 | ||
1430 | /* We assume for now that our PCI function is permitted to | |
1431 | * reset everything. | |
1432 | */ | |
1433 | ||
1434 | if ((*flags & EF10_RESET_MC) == EF10_RESET_MC) { | |
1435 | *flags &= ~EF10_RESET_MC; | |
1436 | return RESET_TYPE_WORLD; | |
1437 | } | |
1438 | ||
1439 | if ((*flags & EF10_RESET_PORT) == EF10_RESET_PORT) { | |
1440 | *flags &= ~EF10_RESET_PORT; | |
1441 | return RESET_TYPE_ALL; | |
1442 | } | |
1443 | ||
1444 | /* no invisible reset implemented */ | |
1445 | ||
1446 | return -EINVAL; | |
1447 | } | |
1448 | ||
3e336261 JC |
1449 | static int efx_ef10_reset(struct efx_nic *efx, enum reset_type reset_type) |
1450 | { | |
1451 | int rc = efx_mcdi_reset(efx, reset_type); | |
1452 | ||
27324820 DP |
1453 | /* Unprivileged functions return -EPERM, but need to return success |
1454 | * here so that the datapath is brought back up. | |
1455 | */ | |
1456 | if (reset_type == RESET_TYPE_WORLD && rc == -EPERM) | |
1457 | rc = 0; | |
1458 | ||
3e336261 JC |
1459 | /* If it was a port reset, trigger reallocation of MC resources. |
1460 | * Note that on an MC reset nothing needs to be done now because we'll | |
1461 | * detect the MC reset later and handle it then. | |
e283546c EC |
1462 | * For an FLR, we never get an MC reset event, but the MC has reset all |
1463 | * resources assigned to us, so we have to trigger reallocation now. | |
3e336261 | 1464 | */ |
e283546c EC |
1465 | if ((reset_type == RESET_TYPE_ALL || |
1466 | reset_type == RESET_TYPE_MCDI_TIMEOUT) && !rc) | |
90c914d2 | 1467 | efx_ef10_table_reset_mc_allocations(efx); |
3e336261 JC |
1468 | return rc; |
1469 | } | |
1470 | ||
8127d661 BH |
1471 | #define EF10_DMA_STAT(ext_name, mcdi_name) \ |
1472 | [EF10_STAT_ ## ext_name] = \ | |
1473 | { #ext_name, 64, 8 * MC_CMD_MAC_ ## mcdi_name } | |
1474 | #define EF10_DMA_INVIS_STAT(int_name, mcdi_name) \ | |
1475 | [EF10_STAT_ ## int_name] = \ | |
1476 | { NULL, 64, 8 * MC_CMD_MAC_ ## mcdi_name } | |
1477 | #define EF10_OTHER_STAT(ext_name) \ | |
1478 | [EF10_STAT_ ## ext_name] = { #ext_name, 0, 0 } | |
1479 | ||
1480 | static const struct efx_hw_stat_desc efx_ef10_stat_desc[EF10_STAT_COUNT] = { | |
e80ca013 DP |
1481 | EF10_DMA_STAT(port_tx_bytes, TX_BYTES), |
1482 | EF10_DMA_STAT(port_tx_packets, TX_PKTS), | |
1483 | EF10_DMA_STAT(port_tx_pause, TX_PAUSE_PKTS), | |
1484 | EF10_DMA_STAT(port_tx_control, TX_CONTROL_PKTS), | |
1485 | EF10_DMA_STAT(port_tx_unicast, TX_UNICAST_PKTS), | |
1486 | EF10_DMA_STAT(port_tx_multicast, TX_MULTICAST_PKTS), | |
1487 | EF10_DMA_STAT(port_tx_broadcast, TX_BROADCAST_PKTS), | |
1488 | EF10_DMA_STAT(port_tx_lt64, TX_LT64_PKTS), | |
1489 | EF10_DMA_STAT(port_tx_64, TX_64_PKTS), | |
1490 | EF10_DMA_STAT(port_tx_65_to_127, TX_65_TO_127_PKTS), | |
1491 | EF10_DMA_STAT(port_tx_128_to_255, TX_128_TO_255_PKTS), | |
1492 | EF10_DMA_STAT(port_tx_256_to_511, TX_256_TO_511_PKTS), | |
1493 | EF10_DMA_STAT(port_tx_512_to_1023, TX_512_TO_1023_PKTS), | |
1494 | EF10_DMA_STAT(port_tx_1024_to_15xx, TX_1024_TO_15XX_PKTS), | |
1495 | EF10_DMA_STAT(port_tx_15xx_to_jumbo, TX_15XX_TO_JUMBO_PKTS), | |
1496 | EF10_DMA_STAT(port_rx_bytes, RX_BYTES), | |
1497 | EF10_DMA_INVIS_STAT(port_rx_bytes_minus_good_bytes, RX_BAD_BYTES), | |
1498 | EF10_OTHER_STAT(port_rx_good_bytes), | |
1499 | EF10_OTHER_STAT(port_rx_bad_bytes), | |
1500 | EF10_DMA_STAT(port_rx_packets, RX_PKTS), | |
1501 | EF10_DMA_STAT(port_rx_good, RX_GOOD_PKTS), | |
1502 | EF10_DMA_STAT(port_rx_bad, RX_BAD_FCS_PKTS), | |
1503 | EF10_DMA_STAT(port_rx_pause, RX_PAUSE_PKTS), | |
1504 | EF10_DMA_STAT(port_rx_control, RX_CONTROL_PKTS), | |
1505 | EF10_DMA_STAT(port_rx_unicast, RX_UNICAST_PKTS), | |
1506 | EF10_DMA_STAT(port_rx_multicast, RX_MULTICAST_PKTS), | |
1507 | EF10_DMA_STAT(port_rx_broadcast, RX_BROADCAST_PKTS), | |
1508 | EF10_DMA_STAT(port_rx_lt64, RX_UNDERSIZE_PKTS), | |
1509 | EF10_DMA_STAT(port_rx_64, RX_64_PKTS), | |
1510 | EF10_DMA_STAT(port_rx_65_to_127, RX_65_TO_127_PKTS), | |
1511 | EF10_DMA_STAT(port_rx_128_to_255, RX_128_TO_255_PKTS), | |
1512 | EF10_DMA_STAT(port_rx_256_to_511, RX_256_TO_511_PKTS), | |
1513 | EF10_DMA_STAT(port_rx_512_to_1023, RX_512_TO_1023_PKTS), | |
1514 | EF10_DMA_STAT(port_rx_1024_to_15xx, RX_1024_TO_15XX_PKTS), | |
1515 | EF10_DMA_STAT(port_rx_15xx_to_jumbo, RX_15XX_TO_JUMBO_PKTS), | |
1516 | EF10_DMA_STAT(port_rx_gtjumbo, RX_GTJUMBO_PKTS), | |
1517 | EF10_DMA_STAT(port_rx_bad_gtjumbo, RX_JABBER_PKTS), | |
1518 | EF10_DMA_STAT(port_rx_overflow, RX_OVERFLOW_PKTS), | |
1519 | EF10_DMA_STAT(port_rx_align_error, RX_ALIGN_ERROR_PKTS), | |
1520 | EF10_DMA_STAT(port_rx_length_error, RX_LENGTH_ERROR_PKTS), | |
1521 | EF10_DMA_STAT(port_rx_nodesc_drops, RX_NODESC_DROPS), | |
9043f48f EC |
1522 | EFX_GENERIC_SW_STAT(rx_nodesc_trunc), |
1523 | EFX_GENERIC_SW_STAT(rx_noskb_drops), | |
e80ca013 DP |
1524 | EF10_DMA_STAT(port_rx_pm_trunc_bb_overflow, PM_TRUNC_BB_OVERFLOW), |
1525 | EF10_DMA_STAT(port_rx_pm_discard_bb_overflow, PM_DISCARD_BB_OVERFLOW), | |
1526 | EF10_DMA_STAT(port_rx_pm_trunc_vfifo_full, PM_TRUNC_VFIFO_FULL), | |
1527 | EF10_DMA_STAT(port_rx_pm_discard_vfifo_full, PM_DISCARD_VFIFO_FULL), | |
1528 | EF10_DMA_STAT(port_rx_pm_trunc_qbb, PM_TRUNC_QBB), | |
1529 | EF10_DMA_STAT(port_rx_pm_discard_qbb, PM_DISCARD_QBB), | |
1530 | EF10_DMA_STAT(port_rx_pm_discard_mapping, PM_DISCARD_MAPPING), | |
1531 | EF10_DMA_STAT(port_rx_dp_q_disabled_packets, RXDP_Q_DISABLED_PKTS), | |
1532 | EF10_DMA_STAT(port_rx_dp_di_dropped_packets, RXDP_DI_DROPPED_PKTS), | |
1533 | EF10_DMA_STAT(port_rx_dp_streaming_packets, RXDP_STREAMING_PKTS), | |
1534 | EF10_DMA_STAT(port_rx_dp_hlb_fetch, RXDP_HLB_FETCH_CONDITIONS), | |
1535 | EF10_DMA_STAT(port_rx_dp_hlb_wait, RXDP_HLB_WAIT_CONDITIONS), | |
3c36a2ad DP |
1536 | EF10_DMA_STAT(rx_unicast, VADAPTER_RX_UNICAST_PACKETS), |
1537 | EF10_DMA_STAT(rx_unicast_bytes, VADAPTER_RX_UNICAST_BYTES), | |
1538 | EF10_DMA_STAT(rx_multicast, VADAPTER_RX_MULTICAST_PACKETS), | |
1539 | EF10_DMA_STAT(rx_multicast_bytes, VADAPTER_RX_MULTICAST_BYTES), | |
1540 | EF10_DMA_STAT(rx_broadcast, VADAPTER_RX_BROADCAST_PACKETS), | |
1541 | EF10_DMA_STAT(rx_broadcast_bytes, VADAPTER_RX_BROADCAST_BYTES), | |
1542 | EF10_DMA_STAT(rx_bad, VADAPTER_RX_BAD_PACKETS), | |
1543 | EF10_DMA_STAT(rx_bad_bytes, VADAPTER_RX_BAD_BYTES), | |
1544 | EF10_DMA_STAT(rx_overflow, VADAPTER_RX_OVERFLOW), | |
1545 | EF10_DMA_STAT(tx_unicast, VADAPTER_TX_UNICAST_PACKETS), | |
1546 | EF10_DMA_STAT(tx_unicast_bytes, VADAPTER_TX_UNICAST_BYTES), | |
1547 | EF10_DMA_STAT(tx_multicast, VADAPTER_TX_MULTICAST_PACKETS), | |
1548 | EF10_DMA_STAT(tx_multicast_bytes, VADAPTER_TX_MULTICAST_BYTES), | |
1549 | EF10_DMA_STAT(tx_broadcast, VADAPTER_TX_BROADCAST_PACKETS), | |
1550 | EF10_DMA_STAT(tx_broadcast_bytes, VADAPTER_TX_BROADCAST_BYTES), | |
1551 | EF10_DMA_STAT(tx_bad, VADAPTER_TX_BAD_PACKETS), | |
1552 | EF10_DMA_STAT(tx_bad_bytes, VADAPTER_TX_BAD_BYTES), | |
1553 | EF10_DMA_STAT(tx_overflow, VADAPTER_TX_OVERFLOW), | |
f411b54d EC |
1554 | EF10_DMA_STAT(fec_uncorrected_errors, FEC_UNCORRECTED_ERRORS), |
1555 | EF10_DMA_STAT(fec_corrected_errors, FEC_CORRECTED_ERRORS), | |
1556 | EF10_DMA_STAT(fec_corrected_symbols_lane0, FEC_CORRECTED_SYMBOLS_LANE0), | |
1557 | EF10_DMA_STAT(fec_corrected_symbols_lane1, FEC_CORRECTED_SYMBOLS_LANE1), | |
1558 | EF10_DMA_STAT(fec_corrected_symbols_lane2, FEC_CORRECTED_SYMBOLS_LANE2), | |
1559 | EF10_DMA_STAT(fec_corrected_symbols_lane3, FEC_CORRECTED_SYMBOLS_LANE3), | |
2c0b6ee8 BK |
1560 | EF10_DMA_STAT(ctpio_vi_busy_fallback, CTPIO_VI_BUSY_FALLBACK), |
1561 | EF10_DMA_STAT(ctpio_long_write_success, CTPIO_LONG_WRITE_SUCCESS), | |
1562 | EF10_DMA_STAT(ctpio_missing_dbell_fail, CTPIO_MISSING_DBELL_FAIL), | |
1563 | EF10_DMA_STAT(ctpio_overflow_fail, CTPIO_OVERFLOW_FAIL), | |
1564 | EF10_DMA_STAT(ctpio_underflow_fail, CTPIO_UNDERFLOW_FAIL), | |
1565 | EF10_DMA_STAT(ctpio_timeout_fail, CTPIO_TIMEOUT_FAIL), | |
1566 | EF10_DMA_STAT(ctpio_noncontig_wr_fail, CTPIO_NONCONTIG_WR_FAIL), | |
1567 | EF10_DMA_STAT(ctpio_frm_clobber_fail, CTPIO_FRM_CLOBBER_FAIL), | |
1568 | EF10_DMA_STAT(ctpio_invalid_wr_fail, CTPIO_INVALID_WR_FAIL), | |
1569 | EF10_DMA_STAT(ctpio_vi_clobber_fallback, CTPIO_VI_CLOBBER_FALLBACK), | |
1570 | EF10_DMA_STAT(ctpio_unqualified_fallback, CTPIO_UNQUALIFIED_FALLBACK), | |
1571 | EF10_DMA_STAT(ctpio_runt_fallback, CTPIO_RUNT_FALLBACK), | |
1572 | EF10_DMA_STAT(ctpio_success, CTPIO_SUCCESS), | |
1573 | EF10_DMA_STAT(ctpio_fallback, CTPIO_FALLBACK), | |
1574 | EF10_DMA_STAT(ctpio_poison, CTPIO_POISON), | |
1575 | EF10_DMA_STAT(ctpio_erase, CTPIO_ERASE), | |
8127d661 BH |
1576 | }; |
1577 | ||
e80ca013 DP |
1578 | #define HUNT_COMMON_STAT_MASK ((1ULL << EF10_STAT_port_tx_bytes) | \ |
1579 | (1ULL << EF10_STAT_port_tx_packets) | \ | |
1580 | (1ULL << EF10_STAT_port_tx_pause) | \ | |
1581 | (1ULL << EF10_STAT_port_tx_unicast) | \ | |
1582 | (1ULL << EF10_STAT_port_tx_multicast) | \ | |
1583 | (1ULL << EF10_STAT_port_tx_broadcast) | \ | |
1584 | (1ULL << EF10_STAT_port_rx_bytes) | \ | |
1585 | (1ULL << \ | |
1586 | EF10_STAT_port_rx_bytes_minus_good_bytes) | \ | |
1587 | (1ULL << EF10_STAT_port_rx_good_bytes) | \ | |
1588 | (1ULL << EF10_STAT_port_rx_bad_bytes) | \ | |
1589 | (1ULL << EF10_STAT_port_rx_packets) | \ | |
1590 | (1ULL << EF10_STAT_port_rx_good) | \ | |
1591 | (1ULL << EF10_STAT_port_rx_bad) | \ | |
1592 | (1ULL << EF10_STAT_port_rx_pause) | \ | |
1593 | (1ULL << EF10_STAT_port_rx_control) | \ | |
1594 | (1ULL << EF10_STAT_port_rx_unicast) | \ | |
1595 | (1ULL << EF10_STAT_port_rx_multicast) | \ | |
1596 | (1ULL << EF10_STAT_port_rx_broadcast) | \ | |
1597 | (1ULL << EF10_STAT_port_rx_lt64) | \ | |
1598 | (1ULL << EF10_STAT_port_rx_64) | \ | |
1599 | (1ULL << EF10_STAT_port_rx_65_to_127) | \ | |
1600 | (1ULL << EF10_STAT_port_rx_128_to_255) | \ | |
1601 | (1ULL << EF10_STAT_port_rx_256_to_511) | \ | |
1602 | (1ULL << EF10_STAT_port_rx_512_to_1023) |\ | |
1603 | (1ULL << EF10_STAT_port_rx_1024_to_15xx) |\ | |
1604 | (1ULL << EF10_STAT_port_rx_15xx_to_jumbo) |\ | |
1605 | (1ULL << EF10_STAT_port_rx_gtjumbo) | \ | |
1606 | (1ULL << EF10_STAT_port_rx_bad_gtjumbo) |\ | |
1607 | (1ULL << EF10_STAT_port_rx_overflow) | \ | |
1608 | (1ULL << EF10_STAT_port_rx_nodesc_drops) |\ | |
e4d112e4 EC |
1609 | (1ULL << GENERIC_STAT_rx_nodesc_trunc) | \ |
1610 | (1ULL << GENERIC_STAT_rx_noskb_drops)) | |
8127d661 | 1611 | |
69b365c3 EC |
1612 | /* On 7000 series NICs, these statistics are only provided by the 10G MAC. |
1613 | * For a 10G/40G switchable port we do not expose these because they might | |
1614 | * not include all the packets they should. | |
1615 | * On 8000 series NICs these statistics are always provided. | |
8127d661 | 1616 | */ |
e80ca013 DP |
1617 | #define HUNT_10G_ONLY_STAT_MASK ((1ULL << EF10_STAT_port_tx_control) | \ |
1618 | (1ULL << EF10_STAT_port_tx_lt64) | \ | |
1619 | (1ULL << EF10_STAT_port_tx_64) | \ | |
1620 | (1ULL << EF10_STAT_port_tx_65_to_127) |\ | |
1621 | (1ULL << EF10_STAT_port_tx_128_to_255) |\ | |
1622 | (1ULL << EF10_STAT_port_tx_256_to_511) |\ | |
1623 | (1ULL << EF10_STAT_port_tx_512_to_1023) |\ | |
1624 | (1ULL << EF10_STAT_port_tx_1024_to_15xx) |\ | |
1625 | (1ULL << EF10_STAT_port_tx_15xx_to_jumbo)) | |
8127d661 BH |
1626 | |
1627 | /* These statistics are only provided by the 40G MAC. For a 10G/40G | |
1628 | * switchable port we do expose these because the errors will otherwise | |
1629 | * be silent. | |
1630 | */ | |
e80ca013 DP |
1631 | #define HUNT_40G_EXTRA_STAT_MASK ((1ULL << EF10_STAT_port_rx_align_error) |\ |
1632 | (1ULL << EF10_STAT_port_rx_length_error)) | |
8127d661 | 1633 | |
568d7a00 EC |
1634 | /* These statistics are only provided if the firmware supports the |
1635 | * capability PM_AND_RXDP_COUNTERS. | |
1636 | */ | |
1637 | #define HUNT_PM_AND_RXDP_STAT_MASK ( \ | |
e80ca013 DP |
1638 | (1ULL << EF10_STAT_port_rx_pm_trunc_bb_overflow) | \ |
1639 | (1ULL << EF10_STAT_port_rx_pm_discard_bb_overflow) | \ | |
1640 | (1ULL << EF10_STAT_port_rx_pm_trunc_vfifo_full) | \ | |
1641 | (1ULL << EF10_STAT_port_rx_pm_discard_vfifo_full) | \ | |
1642 | (1ULL << EF10_STAT_port_rx_pm_trunc_qbb) | \ | |
1643 | (1ULL << EF10_STAT_port_rx_pm_discard_qbb) | \ | |
1644 | (1ULL << EF10_STAT_port_rx_pm_discard_mapping) | \ | |
1645 | (1ULL << EF10_STAT_port_rx_dp_q_disabled_packets) | \ | |
1646 | (1ULL << EF10_STAT_port_rx_dp_di_dropped_packets) | \ | |
1647 | (1ULL << EF10_STAT_port_rx_dp_streaming_packets) | \ | |
1648 | (1ULL << EF10_STAT_port_rx_dp_hlb_fetch) | \ | |
1649 | (1ULL << EF10_STAT_port_rx_dp_hlb_wait)) | |
568d7a00 | 1650 | |
f411b54d EC |
1651 | /* These statistics are only provided if the NIC supports MC_CMD_MAC_STATS_V2, |
1652 | * indicated by returning a value >= MC_CMD_MAC_NSTATS_V2 in | |
1653 | * MC_CMD_GET_CAPABILITIES_V4_OUT_MAC_STATS_NUM_STATS. | |
1654 | * These bits are in the second u64 of the raw mask. | |
1655 | */ | |
1656 | #define EF10_FEC_STAT_MASK ( \ | |
1657 | (1ULL << (EF10_STAT_fec_uncorrected_errors - 64)) | \ | |
1658 | (1ULL << (EF10_STAT_fec_corrected_errors - 64)) | \ | |
1659 | (1ULL << (EF10_STAT_fec_corrected_symbols_lane0 - 64)) | \ | |
1660 | (1ULL << (EF10_STAT_fec_corrected_symbols_lane1 - 64)) | \ | |
1661 | (1ULL << (EF10_STAT_fec_corrected_symbols_lane2 - 64)) | \ | |
1662 | (1ULL << (EF10_STAT_fec_corrected_symbols_lane3 - 64))) | |
1663 | ||
2c0b6ee8 BK |
1664 | /* These statistics are only provided if the NIC supports MC_CMD_MAC_STATS_V3, |
1665 | * indicated by returning a value >= MC_CMD_MAC_NSTATS_V3 in | |
1666 | * MC_CMD_GET_CAPABILITIES_V4_OUT_MAC_STATS_NUM_STATS. | |
1667 | * These bits are in the second u64 of the raw mask. | |
1668 | */ | |
1669 | #define EF10_CTPIO_STAT_MASK ( \ | |
2c0b6ee8 BK |
1670 | (1ULL << (EF10_STAT_ctpio_vi_busy_fallback - 64)) | \ |
1671 | (1ULL << (EF10_STAT_ctpio_long_write_success - 64)) | \ | |
1672 | (1ULL << (EF10_STAT_ctpio_missing_dbell_fail - 64)) | \ | |
1673 | (1ULL << (EF10_STAT_ctpio_overflow_fail - 64)) | \ | |
1674 | (1ULL << (EF10_STAT_ctpio_underflow_fail - 64)) | \ | |
1675 | (1ULL << (EF10_STAT_ctpio_timeout_fail - 64)) | \ | |
1676 | (1ULL << (EF10_STAT_ctpio_noncontig_wr_fail - 64)) | \ | |
1677 | (1ULL << (EF10_STAT_ctpio_frm_clobber_fail - 64)) | \ | |
1678 | (1ULL << (EF10_STAT_ctpio_invalid_wr_fail - 64)) | \ | |
1679 | (1ULL << (EF10_STAT_ctpio_vi_clobber_fallback - 64)) | \ | |
1680 | (1ULL << (EF10_STAT_ctpio_unqualified_fallback - 64)) | \ | |
1681 | (1ULL << (EF10_STAT_ctpio_runt_fallback - 64)) | \ | |
1682 | (1ULL << (EF10_STAT_ctpio_success - 64)) | \ | |
1683 | (1ULL << (EF10_STAT_ctpio_fallback - 64)) | \ | |
1684 | (1ULL << (EF10_STAT_ctpio_poison - 64)) | \ | |
1685 | (1ULL << (EF10_STAT_ctpio_erase - 64))) | |
1686 | ||
4bae913b | 1687 | static u64 efx_ef10_raw_stat_mask(struct efx_nic *efx) |
8127d661 | 1688 | { |
4bae913b | 1689 | u64 raw_mask = HUNT_COMMON_STAT_MASK; |
8127d661 | 1690 | u32 port_caps = efx_mcdi_phy_get_caps(efx); |
568d7a00 | 1691 | struct efx_ef10_nic_data *nic_data = efx->nic_data; |
8127d661 | 1692 | |
3c36a2ad DP |
1693 | if (!(efx->mcdi->fn_flags & |
1694 | 1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_LINKCTRL)) | |
1695 | return 0; | |
1696 | ||
69b365c3 | 1697 | if (port_caps & (1 << MC_CMD_PHY_CAP_40000FDX_LBN)) { |
4bae913b | 1698 | raw_mask |= HUNT_40G_EXTRA_STAT_MASK; |
69b365c3 EC |
1699 | /* 8000 series have everything even at 40G */ |
1700 | if (nic_data->datapath_caps2 & | |
1701 | (1 << MC_CMD_GET_CAPABILITIES_V2_OUT_MAC_STATS_40G_TX_SIZE_BINS_LBN)) | |
1702 | raw_mask |= HUNT_10G_ONLY_STAT_MASK; | |
1703 | } else { | |
4bae913b | 1704 | raw_mask |= HUNT_10G_ONLY_STAT_MASK; |
69b365c3 | 1705 | } |
568d7a00 EC |
1706 | |
1707 | if (nic_data->datapath_caps & | |
1708 | (1 << MC_CMD_GET_CAPABILITIES_OUT_PM_AND_RXDP_COUNTERS_LBN)) | |
1709 | raw_mask |= HUNT_PM_AND_RXDP_STAT_MASK; | |
1710 | ||
4bae913b EC |
1711 | return raw_mask; |
1712 | } | |
1713 | ||
1714 | static void efx_ef10_get_stat_mask(struct efx_nic *efx, unsigned long *mask) | |
1715 | { | |
d94619cd | 1716 | struct efx_ef10_nic_data *nic_data = efx->nic_data; |
3c36a2ad DP |
1717 | u64 raw_mask[2]; |
1718 | ||
1719 | raw_mask[0] = efx_ef10_raw_stat_mask(efx); | |
1720 | ||
d94619cd DP |
1721 | /* Only show vadaptor stats when EVB capability is present */ |
1722 | if (nic_data->datapath_caps & | |
1723 | (1 << MC_CMD_GET_CAPABILITIES_OUT_EVB_LBN)) { | |
1724 | raw_mask[0] |= ~((1ULL << EF10_STAT_rx_unicast) - 1); | |
f411b54d | 1725 | raw_mask[1] = (1ULL << (EF10_STAT_V1_COUNT - 64)) - 1; |
d94619cd DP |
1726 | } else { |
1727 | raw_mask[1] = 0; | |
1728 | } | |
f411b54d EC |
1729 | /* Only show FEC stats when NIC supports MC_CMD_MAC_STATS_V2 */ |
1730 | if (efx->num_mac_stats >= MC_CMD_MAC_NSTATS_V2) | |
1731 | raw_mask[1] |= EF10_FEC_STAT_MASK; | |
4bae913b | 1732 | |
2c0b6ee8 BK |
1733 | /* CTPIO stats appear in V3. Only show them on devices that actually |
1734 | * support CTPIO. Although this driver doesn't use CTPIO others might, | |
1735 | * and we may be reporting the stats for the underlying port. | |
1736 | */ | |
1737 | if (efx->num_mac_stats >= MC_CMD_MAC_NSTATS_V3 && | |
1738 | (nic_data->datapath_caps2 & | |
1739 | (1 << MC_CMD_GET_CAPABILITIES_V4_OUT_CTPIO_LBN))) | |
1740 | raw_mask[1] |= EF10_CTPIO_STAT_MASK; | |
1741 | ||
4bae913b | 1742 | #if BITS_PER_LONG == 64 |
e70c70c3 | 1743 | BUILD_BUG_ON(BITS_TO_LONGS(EF10_STAT_COUNT) != 2); |
3c36a2ad DP |
1744 | mask[0] = raw_mask[0]; |
1745 | mask[1] = raw_mask[1]; | |
4bae913b | 1746 | #else |
e70c70c3 | 1747 | BUILD_BUG_ON(BITS_TO_LONGS(EF10_STAT_COUNT) != 3); |
3c36a2ad DP |
1748 | mask[0] = raw_mask[0] & 0xffffffff; |
1749 | mask[1] = raw_mask[0] >> 32; | |
1750 | mask[2] = raw_mask[1] & 0xffffffff; | |
4bae913b | 1751 | #endif |
8127d661 BH |
1752 | } |
1753 | ||
9dae5921 | 1754 | static size_t efx_ef10_describe_stats(struct efx_nic *efx, u8 **names) |
8127d661 | 1755 | { |
4bae913b EC |
1756 | DECLARE_BITMAP(mask, EF10_STAT_COUNT); |
1757 | ||
1758 | efx_ef10_get_stat_mask(efx, mask); | |
8127d661 | 1759 | return efx_nic_describe_stats(efx_ef10_stat_desc, EF10_STAT_COUNT, |
4bae913b | 1760 | mask, names); |
8127d661 BH |
1761 | } |
1762 | ||
cab351be JK |
1763 | static void efx_ef10_get_fec_stats(struct efx_nic *efx, |
1764 | struct ethtool_fec_stats *fec_stats) | |
1765 | { | |
1766 | DECLARE_BITMAP(mask, EF10_STAT_COUNT); | |
1767 | struct efx_ef10_nic_data *nic_data = efx->nic_data; | |
1768 | u64 *stats = nic_data->stats; | |
1769 | ||
1770 | efx_ef10_get_stat_mask(efx, mask); | |
1771 | if (test_bit(EF10_STAT_fec_corrected_errors, mask)) | |
1772 | fec_stats->corrected_blocks.total = | |
1773 | stats[EF10_STAT_fec_corrected_errors]; | |
1774 | if (test_bit(EF10_STAT_fec_uncorrected_errors, mask)) | |
1775 | fec_stats->uncorrectable_blocks.total = | |
1776 | stats[EF10_STAT_fec_uncorrected_errors]; | |
1777 | } | |
1778 | ||
d7788196 DP |
1779 | static size_t efx_ef10_update_stats_common(struct efx_nic *efx, u64 *full_stats, |
1780 | struct rtnl_link_stats64 *core_stats) | |
1781 | { | |
1782 | DECLARE_BITMAP(mask, EF10_STAT_COUNT); | |
1783 | struct efx_ef10_nic_data *nic_data = efx->nic_data; | |
1784 | u64 *stats = nic_data->stats; | |
1785 | size_t stats_count = 0, index; | |
1786 | ||
1787 | efx_ef10_get_stat_mask(efx, mask); | |
1788 | ||
1789 | if (full_stats) { | |
1790 | for_each_set_bit(index, mask, EF10_STAT_COUNT) { | |
1791 | if (efx_ef10_stat_desc[index].name) { | |
1792 | *full_stats++ = stats[index]; | |
1793 | ++stats_count; | |
1794 | } | |
1795 | } | |
1796 | } | |
1797 | ||
fbe4307e BK |
1798 | if (!core_stats) |
1799 | return stats_count; | |
1800 | ||
1801 | if (nic_data->datapath_caps & | |
1802 | 1 << MC_CMD_GET_CAPABILITIES_OUT_EVB_LBN) { | |
1803 | /* Use vadaptor stats. */ | |
0fc95fca DP |
1804 | core_stats->rx_packets = stats[EF10_STAT_rx_unicast] + |
1805 | stats[EF10_STAT_rx_multicast] + | |
1806 | stats[EF10_STAT_rx_broadcast]; | |
1807 | core_stats->tx_packets = stats[EF10_STAT_tx_unicast] + | |
1808 | stats[EF10_STAT_tx_multicast] + | |
1809 | stats[EF10_STAT_tx_broadcast]; | |
1810 | core_stats->rx_bytes = stats[EF10_STAT_rx_unicast_bytes] + | |
1811 | stats[EF10_STAT_rx_multicast_bytes] + | |
1812 | stats[EF10_STAT_rx_broadcast_bytes]; | |
1813 | core_stats->tx_bytes = stats[EF10_STAT_tx_unicast_bytes] + | |
1814 | stats[EF10_STAT_tx_multicast_bytes] + | |
1815 | stats[EF10_STAT_tx_broadcast_bytes]; | |
1816 | core_stats->rx_dropped = stats[GENERIC_STAT_rx_nodesc_trunc] + | |
d7788196 | 1817 | stats[GENERIC_STAT_rx_noskb_drops]; |
0fc95fca DP |
1818 | core_stats->multicast = stats[EF10_STAT_rx_multicast]; |
1819 | core_stats->rx_crc_errors = stats[EF10_STAT_rx_bad]; | |
1820 | core_stats->rx_fifo_errors = stats[EF10_STAT_rx_overflow]; | |
1821 | core_stats->rx_errors = core_stats->rx_crc_errors; | |
1822 | core_stats->tx_errors = stats[EF10_STAT_tx_bad]; | |
fbe4307e BK |
1823 | } else { |
1824 | /* Use port stats. */ | |
1825 | core_stats->rx_packets = stats[EF10_STAT_port_rx_packets]; | |
1826 | core_stats->tx_packets = stats[EF10_STAT_port_tx_packets]; | |
1827 | core_stats->rx_bytes = stats[EF10_STAT_port_rx_bytes]; | |
1828 | core_stats->tx_bytes = stats[EF10_STAT_port_tx_bytes]; | |
1829 | core_stats->rx_dropped = stats[EF10_STAT_port_rx_nodesc_drops] + | |
1830 | stats[GENERIC_STAT_rx_nodesc_trunc] + | |
1831 | stats[GENERIC_STAT_rx_noskb_drops]; | |
1832 | core_stats->multicast = stats[EF10_STAT_port_rx_multicast]; | |
1833 | core_stats->rx_length_errors = | |
1834 | stats[EF10_STAT_port_rx_gtjumbo] + | |
1835 | stats[EF10_STAT_port_rx_length_error]; | |
1836 | core_stats->rx_crc_errors = stats[EF10_STAT_port_rx_bad]; | |
1837 | core_stats->rx_frame_errors = | |
1838 | stats[EF10_STAT_port_rx_align_error]; | |
1839 | core_stats->rx_fifo_errors = stats[EF10_STAT_port_rx_overflow]; | |
1840 | core_stats->rx_errors = (core_stats->rx_length_errors + | |
1841 | core_stats->rx_crc_errors + | |
1842 | core_stats->rx_frame_errors); | |
d7788196 DP |
1843 | } |
1844 | ||
1845 | return stats_count; | |
1846 | } | |
1847 | ||
d3142c19 EC |
1848 | static size_t efx_ef10_update_stats_pf(struct efx_nic *efx, u64 *full_stats, |
1849 | struct rtnl_link_stats64 *core_stats) | |
8127d661 BH |
1850 | { |
1851 | struct efx_ef10_nic_data *nic_data = efx->nic_data; | |
4bae913b | 1852 | DECLARE_BITMAP(mask, EF10_STAT_COUNT); |
8127d661 | 1853 | u64 *stats = nic_data->stats; |
8127d661 | 1854 | |
4bae913b EC |
1855 | efx_ef10_get_stat_mask(efx, mask); |
1856 | ||
d1b35543 EC |
1857 | /* If NIC was fini'd (probably resetting), then we can't read |
1858 | * updated stats right now. | |
1859 | */ | |
1860 | if (nic_data->mc_stats) { | |
1861 | efx_nic_copy_stats(efx, nic_data->mc_stats); | |
1862 | efx_nic_update_stats(efx_ef10_stat_desc, EF10_STAT_COUNT, | |
1863 | mask, stats, nic_data->mc_stats, false); | |
1864 | } | |
8127d661 BH |
1865 | |
1866 | /* Update derived statistics */ | |
e80ca013 DP |
1867 | efx_nic_fix_nodesc_drop_stat(efx, |
1868 | &stats[EF10_STAT_port_rx_nodesc_drops]); | |
d3142c19 EC |
1869 | /* MC Firmware reads RX_BYTES and RX_GOOD_BYTES from the MAC. |
1870 | * It then calculates RX_BAD_BYTES and DMAs it to us with RX_BYTES. | |
1871 | * We report these as port_rx_ stats. We are not given RX_GOOD_BYTES. | |
1872 | * Here we calculate port_rx_good_bytes. | |
1873 | */ | |
e80ca013 DP |
1874 | stats[EF10_STAT_port_rx_good_bytes] = |
1875 | stats[EF10_STAT_port_rx_bytes] - | |
1876 | stats[EF10_STAT_port_rx_bytes_minus_good_bytes]; | |
d3142c19 EC |
1877 | |
1878 | /* The asynchronous reads used to calculate RX_BAD_BYTES in | |
1879 | * MC Firmware are done such that we should not see an increase in | |
1880 | * RX_BAD_BYTES when a good packet has arrived. Unfortunately this | |
1881 | * does mean that the stat can decrease at times. Here we do not | |
1882 | * update the stat unless it has increased or has gone to zero | |
1883 | * (In the case of the NIC rebooting). | |
1884 | * Please see Bug 33781 for a discussion of why things work this way. | |
1885 | */ | |
e80ca013 DP |
1886 | efx_update_diff_stat(&stats[EF10_STAT_port_rx_bad_bytes], |
1887 | stats[EF10_STAT_port_rx_bytes_minus_good_bytes]); | |
e4d112e4 | 1888 | efx_update_sw_stats(efx, stats); |
8127d661 | 1889 | |
d7788196 DP |
1890 | return efx_ef10_update_stats_common(efx, full_stats, core_stats); |
1891 | } | |
8127d661 | 1892 | |
d7788196 | 1893 | static int efx_ef10_try_update_nic_stats_vf(struct efx_nic *efx) |
efd7ed0f | 1894 | __must_hold(&efx->stats_lock) |
d7788196 DP |
1895 | { |
1896 | MCDI_DECLARE_BUF(inbuf, MC_CMD_MAC_STATS_IN_LEN); | |
1897 | struct efx_ef10_nic_data *nic_data = efx->nic_data; | |
1898 | DECLARE_BITMAP(mask, EF10_STAT_COUNT); | |
1899 | __le64 generation_start, generation_end; | |
1900 | u64 *stats = nic_data->stats; | |
c1be4821 | 1901 | u32 dma_len = efx->num_mac_stats * sizeof(u64); |
d7788196 DP |
1902 | struct efx_buffer stats_buf; |
1903 | __le64 *dma_stats; | |
1904 | int rc; | |
1905 | ||
f00bf230 DP |
1906 | spin_unlock_bh(&efx->stats_lock); |
1907 | ||
d7788196 DP |
1908 | efx_ef10_get_stat_mask(efx, mask); |
1909 | ||
caa241f0 | 1910 | rc = efx_nic_alloc_buffer(efx, &stats_buf, dma_len, GFP_KERNEL); |
f00bf230 DP |
1911 | if (rc) { |
1912 | spin_lock_bh(&efx->stats_lock); | |
d7788196 | 1913 | return rc; |
f00bf230 | 1914 | } |
d7788196 DP |
1915 | |
1916 | dma_stats = stats_buf.addr; | |
c1be4821 | 1917 | dma_stats[efx->num_mac_stats - 1] = EFX_MC_STATS_GENERATION_INVALID; |
d7788196 DP |
1918 | |
1919 | MCDI_SET_QWORD(inbuf, MAC_STATS_IN_DMA_ADDR, stats_buf.dma_addr); | |
1920 | MCDI_POPULATE_DWORD_1(inbuf, MAC_STATS_IN_CMD, | |
0fc95fca | 1921 | MAC_STATS_IN_DMA, 1); |
d7788196 DP |
1922 | MCDI_SET_DWORD(inbuf, MAC_STATS_IN_DMA_LEN, dma_len); |
1923 | MCDI_SET_DWORD(inbuf, MAC_STATS_IN_PORT_ID, EVB_PORT_ID_ASSIGNED); | |
1924 | ||
6dd4859b DP |
1925 | rc = efx_mcdi_rpc_quiet(efx, MC_CMD_MAC_STATS, inbuf, sizeof(inbuf), |
1926 | NULL, 0, NULL); | |
d7788196 | 1927 | spin_lock_bh(&efx->stats_lock); |
6dd4859b DP |
1928 | if (rc) { |
1929 | /* Expect ENOENT if DMA queues have not been set up */ | |
1930 | if (rc != -ENOENT || atomic_read(&efx->active_queues)) | |
1931 | efx_mcdi_display_error(efx, MC_CMD_MAC_STATS, | |
1932 | sizeof(inbuf), NULL, 0, rc); | |
d7788196 | 1933 | goto out; |
6dd4859b | 1934 | } |
d7788196 | 1935 | |
c1be4821 | 1936 | generation_end = dma_stats[efx->num_mac_stats - 1]; |
0fc95fca DP |
1937 | if (generation_end == EFX_MC_STATS_GENERATION_INVALID) { |
1938 | WARN_ON_ONCE(1); | |
d7788196 | 1939 | goto out; |
0fc95fca | 1940 | } |
d7788196 DP |
1941 | rmb(); |
1942 | efx_nic_update_stats(efx_ef10_stat_desc, EF10_STAT_COUNT, mask, | |
1943 | stats, stats_buf.addr, false); | |
1944 | rmb(); | |
1945 | generation_start = dma_stats[MC_CMD_MAC_GENERATION_START]; | |
1946 | if (generation_end != generation_start) { | |
1947 | rc = -EAGAIN; | |
1948 | goto out; | |
8127d661 BH |
1949 | } |
1950 | ||
d7788196 DP |
1951 | efx_update_sw_stats(efx, stats); |
1952 | out: | |
ada74c55 ÍH |
1953 | /* releasing a DMA coherent buffer with BH disabled can panic */ |
1954 | spin_unlock_bh(&efx->stats_lock); | |
d7788196 | 1955 | efx_nic_free_buffer(efx, &stats_buf); |
ada74c55 | 1956 | spin_lock_bh(&efx->stats_lock); |
d7788196 DP |
1957 | return rc; |
1958 | } | |
1959 | ||
1960 | static size_t efx_ef10_update_stats_vf(struct efx_nic *efx, u64 *full_stats, | |
1961 | struct rtnl_link_stats64 *core_stats) | |
1962 | { | |
1963 | if (efx_ef10_try_update_nic_stats_vf(efx)) | |
1964 | return 0; | |
1965 | ||
1966 | return efx_ef10_update_stats_common(efx, full_stats, core_stats); | |
8127d661 BH |
1967 | } |
1968 | ||
623b9988 EC |
1969 | static size_t efx_ef10_update_stats_atomic_vf(struct efx_nic *efx, u64 *full_stats, |
1970 | struct rtnl_link_stats64 *core_stats) | |
1971 | { | |
1972 | struct efx_ef10_nic_data *nic_data = efx->nic_data; | |
1973 | ||
1974 | /* In atomic context, cannot update HW stats. Just update the | |
1975 | * software stats and return so the caller can continue. | |
1976 | */ | |
1977 | efx_update_sw_stats(efx, nic_data->stats); | |
1978 | return efx_ef10_update_stats_common(efx, full_stats, core_stats); | |
1979 | } | |
1980 | ||
8127d661 BH |
1981 | static void efx_ef10_push_irq_moderation(struct efx_channel *channel) |
1982 | { | |
1983 | struct efx_nic *efx = channel->efx; | |
539de7c5 | 1984 | unsigned int mode, usecs; |
8127d661 BH |
1985 | efx_dword_t timer_cmd; |
1986 | ||
539de7c5 | 1987 | if (channel->irq_moderation_us) { |
8127d661 | 1988 | mode = 3; |
539de7c5 | 1989 | usecs = channel->irq_moderation_us; |
8127d661 BH |
1990 | } else { |
1991 | mode = 0; | |
539de7c5 | 1992 | usecs = 0; |
8127d661 BH |
1993 | } |
1994 | ||
539de7c5 BK |
1995 | if (EFX_EF10_WORKAROUND_61265(efx)) { |
1996 | MCDI_DECLARE_BUF(inbuf, MC_CMD_SET_EVQ_TMR_IN_LEN); | |
1997 | unsigned int ns = usecs * 1000; | |
1998 | ||
1999 | MCDI_SET_DWORD(inbuf, SET_EVQ_TMR_IN_INSTANCE, | |
2000 | channel->channel); | |
2001 | MCDI_SET_DWORD(inbuf, SET_EVQ_TMR_IN_TMR_LOAD_REQ_NS, ns); | |
2002 | MCDI_SET_DWORD(inbuf, SET_EVQ_TMR_IN_TMR_RELOAD_REQ_NS, ns); | |
2003 | MCDI_SET_DWORD(inbuf, SET_EVQ_TMR_IN_TMR_MODE, mode); | |
2004 | ||
2005 | efx_mcdi_rpc_async(efx, MC_CMD_SET_EVQ_TMR, | |
2006 | inbuf, sizeof(inbuf), 0, NULL, 0); | |
2007 | } else if (EFX_EF10_WORKAROUND_35388(efx)) { | |
2008 | unsigned int ticks = efx_usecs_to_ticks(efx, usecs); | |
2009 | ||
8127d661 BH |
2010 | EFX_POPULATE_DWORD_3(timer_cmd, ERF_DD_EVQ_IND_TIMER_FLAGS, |
2011 | EFE_DD_EVQ_IND_TIMER_FLAGS, | |
2012 | ERF_DD_EVQ_IND_TIMER_MODE, mode, | |
539de7c5 | 2013 | ERF_DD_EVQ_IND_TIMER_VAL, ticks); |
8127d661 BH |
2014 | efx_writed_page(efx, &timer_cmd, ER_DD_EVQ_INDIRECT, |
2015 | channel->channel); | |
2016 | } else { | |
539de7c5 BK |
2017 | unsigned int ticks = efx_usecs_to_ticks(efx, usecs); |
2018 | ||
0bc959a9 BK |
2019 | EFX_POPULATE_DWORD_3(timer_cmd, ERF_DZ_TC_TIMER_MODE, mode, |
2020 | ERF_DZ_TC_TIMER_VAL, ticks, | |
2021 | ERF_FZ_TC_TMR_REL_VAL, ticks); | |
8127d661 BH |
2022 | efx_writed_page(efx, &timer_cmd, ER_DZ_EVQ_TMR, |
2023 | channel->channel); | |
2024 | } | |
2025 | } | |
2026 | ||
02246a7f SS |
2027 | static void efx_ef10_get_wol_vf(struct efx_nic *efx, |
2028 | struct ethtool_wolinfo *wol) {} | |
2029 | ||
2030 | static int efx_ef10_set_wol_vf(struct efx_nic *efx, u32 type) | |
2031 | { | |
2032 | return -EOPNOTSUPP; | |
2033 | } | |
2034 | ||
8127d661 BH |
2035 | static void efx_ef10_get_wol(struct efx_nic *efx, struct ethtool_wolinfo *wol) |
2036 | { | |
2037 | wol->supported = 0; | |
2038 | wol->wolopts = 0; | |
2039 | memset(&wol->sopass, 0, sizeof(wol->sopass)); | |
2040 | } | |
2041 | ||
2042 | static int efx_ef10_set_wol(struct efx_nic *efx, u32 type) | |
2043 | { | |
2044 | if (type != 0) | |
2045 | return -EINVAL; | |
2046 | return 0; | |
2047 | } | |
2048 | ||
2049 | static void efx_ef10_mcdi_request(struct efx_nic *efx, | |
2050 | const efx_dword_t *hdr, size_t hdr_len, | |
2051 | const efx_dword_t *sdu, size_t sdu_len) | |
2052 | { | |
2053 | struct efx_ef10_nic_data *nic_data = efx->nic_data; | |
2054 | u8 *pdu = nic_data->mcdi_buf.addr; | |
2055 | ||
2056 | memcpy(pdu, hdr, hdr_len); | |
2057 | memcpy(pdu + hdr_len, sdu, sdu_len); | |
2058 | wmb(); | |
2059 | ||
2060 | /* The hardware provides 'low' and 'high' (doorbell) registers | |
2061 | * for passing the 64-bit address of an MCDI request to | |
2062 | * firmware. However the dwords are swapped by firmware. The | |
2063 | * least significant bits of the doorbell are then 0 for all | |
2064 | * MCDI requests due to alignment. | |
2065 | */ | |
2066 | _efx_writed(efx, cpu_to_le32((u64)nic_data->mcdi_buf.dma_addr >> 32), | |
2067 | ER_DZ_MC_DB_LWRD); | |
2068 | _efx_writed(efx, cpu_to_le32((u32)nic_data->mcdi_buf.dma_addr), | |
2069 | ER_DZ_MC_DB_HWRD); | |
2070 | } | |
2071 | ||
2072 | static bool efx_ef10_mcdi_poll_response(struct efx_nic *efx) | |
2073 | { | |
2074 | struct efx_ef10_nic_data *nic_data = efx->nic_data; | |
2075 | const efx_dword_t hdr = *(const efx_dword_t *)nic_data->mcdi_buf.addr; | |
2076 | ||
2077 | rmb(); | |
2078 | return EFX_DWORD_FIELD(hdr, MCDI_HEADER_RESPONSE); | |
2079 | } | |
2080 | ||
2081 | static void | |
2082 | efx_ef10_mcdi_read_response(struct efx_nic *efx, efx_dword_t *outbuf, | |
2083 | size_t offset, size_t outlen) | |
2084 | { | |
2085 | struct efx_ef10_nic_data *nic_data = efx->nic_data; | |
2086 | const u8 *pdu = nic_data->mcdi_buf.addr; | |
2087 | ||
2088 | memcpy(outbuf, pdu + offset, outlen); | |
2089 | } | |
2090 | ||
c577e59e DP |
2091 | static void efx_ef10_mcdi_reboot_detected(struct efx_nic *efx) |
2092 | { | |
2093 | struct efx_ef10_nic_data *nic_data = efx->nic_data; | |
2094 | ||
2095 | /* All our allocations have been reset */ | |
90c914d2 | 2096 | efx_ef10_table_reset_mc_allocations(efx); |
c577e59e DP |
2097 | |
2098 | /* The datapath firmware might have been changed */ | |
2099 | nic_data->must_check_datapath_caps = true; | |
2100 | ||
2101 | /* MAC statistics have been cleared on the NIC; clear the local | |
2102 | * statistic that we update with efx_update_diff_stat(). | |
2103 | */ | |
2104 | nic_data->stats[EF10_STAT_port_rx_bad_bytes] = 0; | |
2105 | } | |
2106 | ||
8127d661 BH |
2107 | static int efx_ef10_mcdi_poll_reboot(struct efx_nic *efx) |
2108 | { | |
2109 | struct efx_ef10_nic_data *nic_data = efx->nic_data; | |
2110 | int rc; | |
2111 | ||
2112 | rc = efx_ef10_get_warm_boot_count(efx); | |
2113 | if (rc < 0) { | |
2114 | /* The firmware is presumably in the process of | |
2115 | * rebooting. However, we are supposed to report each | |
2116 | * reboot just once, so we must only do that once we | |
2117 | * can read and store the updated warm boot count. | |
2118 | */ | |
2119 | return 0; | |
2120 | } | |
2121 | ||
2122 | if (rc == nic_data->warm_boot_count) | |
2123 | return 0; | |
2124 | ||
2125 | nic_data->warm_boot_count = rc; | |
c577e59e | 2126 | efx_ef10_mcdi_reboot_detected(efx); |
869070c5 | 2127 | |
8127d661 BH |
2128 | return -EIO; |
2129 | } | |
2130 | ||
2131 | /* Handle an MSI interrupt | |
2132 | * | |
2133 | * Handle an MSI hardware interrupt. This routine schedules event | |
2134 | * queue processing. No interrupt acknowledgement cycle is necessary. | |
2135 | * Also, we never need to check that the interrupt is for us, since | |
2136 | * MSI interrupts cannot be shared. | |
2137 | */ | |
2138 | static irqreturn_t efx_ef10_msi_interrupt(int irq, void *dev_id) | |
2139 | { | |
2140 | struct efx_msi_context *context = dev_id; | |
2141 | struct efx_nic *efx = context->efx; | |
2142 | ||
2143 | netif_vdbg(efx, intr, efx->net_dev, | |
2144 | "IRQ %d on CPU %d\n", irq, raw_smp_processor_id()); | |
2145 | ||
6aa7de05 | 2146 | if (likely(READ_ONCE(efx->irq_soft_enabled))) { |
8127d661 BH |
2147 | /* Note test interrupts */ |
2148 | if (context->index == efx->irq_level) | |
2149 | efx->last_irq_cpu = raw_smp_processor_id(); | |
2150 | ||
2151 | /* Schedule processing of the channel */ | |
2152 | efx_schedule_channel_irq(efx->channel[context->index]); | |
2153 | } | |
2154 | ||
2155 | return IRQ_HANDLED; | |
2156 | } | |
2157 | ||
2158 | static irqreturn_t efx_ef10_legacy_interrupt(int irq, void *dev_id) | |
2159 | { | |
2160 | struct efx_nic *efx = dev_id; | |
6aa7de05 | 2161 | bool soft_enabled = READ_ONCE(efx->irq_soft_enabled); |
8127d661 BH |
2162 | struct efx_channel *channel; |
2163 | efx_dword_t reg; | |
2164 | u32 queues; | |
2165 | ||
2166 | /* Read the ISR which also ACKs the interrupts */ | |
2167 | efx_readd(efx, ®, ER_DZ_BIU_INT_ISR); | |
2168 | queues = EFX_DWORD_FIELD(reg, ERF_DZ_ISR_REG); | |
2169 | ||
2170 | if (queues == 0) | |
2171 | return IRQ_NONE; | |
2172 | ||
2173 | if (likely(soft_enabled)) { | |
2174 | /* Note test interrupts */ | |
2175 | if (queues & (1U << efx->irq_level)) | |
2176 | efx->last_irq_cpu = raw_smp_processor_id(); | |
2177 | ||
2178 | efx_for_each_channel(channel, efx) { | |
2179 | if (queues & 1) | |
2180 | efx_schedule_channel_irq(channel); | |
2181 | queues >>= 1; | |
2182 | } | |
2183 | } | |
2184 | ||
2185 | netif_vdbg(efx, intr, efx->net_dev, | |
2186 | "IRQ %d on CPU %d status " EFX_DWORD_FMT "\n", | |
2187 | irq, raw_smp_processor_id(), EFX_DWORD_VAL(reg)); | |
2188 | ||
2189 | return IRQ_HANDLED; | |
2190 | } | |
2191 | ||
942e298e | 2192 | static int efx_ef10_irq_test_generate(struct efx_nic *efx) |
8127d661 BH |
2193 | { |
2194 | MCDI_DECLARE_BUF(inbuf, MC_CMD_TRIGGER_INTERRUPT_IN_LEN); | |
2195 | ||
942e298e JC |
2196 | if (efx_mcdi_set_workaround(efx, MC_CMD_WORKAROUND_BUG41750, true, |
2197 | NULL) == 0) | |
2198 | return -ENOTSUPP; | |
2199 | ||
8127d661 BH |
2200 | BUILD_BUG_ON(MC_CMD_TRIGGER_INTERRUPT_OUT_LEN != 0); |
2201 | ||
2202 | MCDI_SET_DWORD(inbuf, TRIGGER_INTERRUPT_IN_INTR_LEVEL, efx->irq_level); | |
942e298e | 2203 | return efx_mcdi_rpc(efx, MC_CMD_TRIGGER_INTERRUPT, |
8127d661 BH |
2204 | inbuf, sizeof(inbuf), NULL, 0, NULL); |
2205 | } | |
2206 | ||
2207 | static int efx_ef10_tx_probe(struct efx_tx_queue *tx_queue) | |
2208 | { | |
85d43fdb EC |
2209 | /* low two bits of label are what we want for type */ |
2210 | BUILD_BUG_ON((EFX_TXQ_TYPE_OUTER_CSUM | EFX_TXQ_TYPE_INNER_CSUM) != 3); | |
2211 | tx_queue->type = tx_queue->label & 3; | |
d73e7715 | 2212 | return efx_nic_alloc_buffer(tx_queue->efx, &tx_queue->txd, |
8127d661 BH |
2213 | (tx_queue->ptr_mask + 1) * |
2214 | sizeof(efx_qword_t), | |
2215 | GFP_KERNEL); | |
2216 | } | |
2217 | ||
2218 | /* This writes to the TX_DESC_WPTR and also pushes data */ | |
2219 | static inline void efx_ef10_push_tx_desc(struct efx_tx_queue *tx_queue, | |
2220 | const efx_qword_t *txd) | |
2221 | { | |
2222 | unsigned int write_ptr; | |
2223 | efx_oword_t reg; | |
2224 | ||
2225 | write_ptr = tx_queue->write_count & tx_queue->ptr_mask; | |
2226 | EFX_POPULATE_OWORD_1(reg, ERF_DZ_TX_DESC_WPTR, write_ptr); | |
2227 | reg.qword[0] = *txd; | |
2228 | efx_writeo_page(tx_queue->efx, ®, | |
2229 | ER_DZ_TX_DESC_UPD, tx_queue->queue); | |
2230 | } | |
2231 | ||
e9117e50 BK |
2232 | /* Add Firmware-Assisted TSO v2 option descriptors to a queue. |
2233 | */ | |
1679c72c EC |
2234 | int efx_ef10_tx_tso_desc(struct efx_tx_queue *tx_queue, struct sk_buff *skb, |
2235 | bool *data_mapped) | |
e9117e50 BK |
2236 | { |
2237 | struct efx_tx_buffer *buffer; | |
0ce8df66 EC |
2238 | u16 inner_ipv4_id = 0; |
2239 | u16 outer_ipv4_id = 0; | |
e9117e50 BK |
2240 | struct tcphdr *tcp; |
2241 | struct iphdr *ip; | |
0ce8df66 | 2242 | u16 ip_tot_len; |
e9117e50 BK |
2243 | u32 seqnum; |
2244 | u32 mss; | |
2245 | ||
e01b16a7 | 2246 | EFX_WARN_ON_ONCE_PARANOID(tx_queue->tso_version != 2); |
e9117e50 BK |
2247 | |
2248 | mss = skb_shinfo(skb)->gso_size; | |
2249 | ||
2250 | if (unlikely(mss < 4)) { | |
2251 | WARN_ONCE(1, "MSS of %u is too small for TSO v2\n", mss); | |
2252 | return -EINVAL; | |
2253 | } | |
2254 | ||
0ce8df66 EC |
2255 | if (skb->encapsulation) { |
2256 | if (!tx_queue->tso_encap) | |
2257 | return -EINVAL; | |
2258 | ip = ip_hdr(skb); | |
2259 | if (ip->version == 4) | |
2260 | outer_ipv4_id = ntohs(ip->id); | |
2261 | ||
2262 | ip = inner_ip_hdr(skb); | |
2263 | tcp = inner_tcp_hdr(skb); | |
2264 | } else { | |
2265 | ip = ip_hdr(skb); | |
2266 | tcp = tcp_hdr(skb); | |
2267 | } | |
2268 | ||
2269 | /* 8000-series EF10 hardware requires that IP Total Length be | |
2270 | * greater than or equal to the value it will have in each segment | |
2271 | * (which is at most mss + 208 + TCP header length), but also less | |
2272 | * than (0x10000 - inner_network_header). Otherwise the TCP | |
2273 | * checksum calculation will be broken for encapsulated packets. | |
2274 | * We fill in ip->tot_len with 0xff30, which should satisfy the | |
2275 | * first requirement unless the MSS is ridiculously large (which | |
2276 | * should be impossible as the driver max MTU is 9216); it is | |
2277 | * guaranteed to satisfy the second as we only attempt TSO if | |
2278 | * inner_network_header <= 208. | |
2279 | */ | |
b8ff3395 | 2280 | ip_tot_len = 0x10000 - EFX_TSO2_MAX_HDRLEN; |
0ce8df66 EC |
2281 | EFX_WARN_ON_ONCE_PARANOID(mss + EFX_TSO2_MAX_HDRLEN + |
2282 | (tcp->doff << 2u) > ip_tot_len); | |
2283 | ||
e9117e50 | 2284 | if (ip->version == 4) { |
0ce8df66 | 2285 | ip->tot_len = htons(ip_tot_len); |
e9117e50 | 2286 | ip->check = 0; |
0ce8df66 | 2287 | inner_ipv4_id = ntohs(ip->id); |
e9117e50 | 2288 | } else { |
0ce8df66 | 2289 | ((struct ipv6hdr *)ip)->payload_len = htons(ip_tot_len); |
e9117e50 BK |
2290 | } |
2291 | ||
e9117e50 BK |
2292 | seqnum = ntohl(tcp->seq); |
2293 | ||
2294 | buffer = efx_tx_queue_get_insert_buffer(tx_queue); | |
2295 | ||
2296 | buffer->flags = EFX_TX_BUF_OPTION; | |
2297 | buffer->len = 0; | |
2298 | buffer->unmap_len = 0; | |
2299 | EFX_POPULATE_QWORD_5(buffer->option, | |
2300 | ESF_DZ_TX_DESC_IS_OPT, 1, | |
2301 | ESF_DZ_TX_OPTION_TYPE, ESE_DZ_TX_OPTION_DESC_TSO, | |
2302 | ESF_DZ_TX_TSO_OPTION_TYPE, | |
2303 | ESE_DZ_TX_TSO_OPTION_DESC_FATSO2A, | |
0ce8df66 | 2304 | ESF_DZ_TX_TSO_IP_ID, inner_ipv4_id, |
e9117e50 BK |
2305 | ESF_DZ_TX_TSO_TCP_SEQNO, seqnum |
2306 | ); | |
2307 | ++tx_queue->insert_count; | |
2308 | ||
2309 | buffer = efx_tx_queue_get_insert_buffer(tx_queue); | |
2310 | ||
2311 | buffer->flags = EFX_TX_BUF_OPTION; | |
2312 | buffer->len = 0; | |
2313 | buffer->unmap_len = 0; | |
0ce8df66 | 2314 | EFX_POPULATE_QWORD_5(buffer->option, |
e9117e50 BK |
2315 | ESF_DZ_TX_DESC_IS_OPT, 1, |
2316 | ESF_DZ_TX_OPTION_TYPE, ESE_DZ_TX_OPTION_DESC_TSO, | |
2317 | ESF_DZ_TX_TSO_OPTION_TYPE, | |
2318 | ESE_DZ_TX_TSO_OPTION_DESC_FATSO2B, | |
0ce8df66 | 2319 | ESF_DZ_TX_TSO_OUTER_IPID, outer_ipv4_id, |
e9117e50 BK |
2320 | ESF_DZ_TX_TSO_TCP_MSS, mss |
2321 | ); | |
2322 | ++tx_queue->insert_count; | |
2323 | ||
2324 | return 0; | |
2325 | } | |
2326 | ||
46d1efd8 EC |
2327 | static u32 efx_ef10_tso_versions(struct efx_nic *efx) |
2328 | { | |
2329 | struct efx_ef10_nic_data *nic_data = efx->nic_data; | |
2330 | u32 tso_versions = 0; | |
2331 | ||
2332 | if (nic_data->datapath_caps & | |
2333 | (1 << MC_CMD_GET_CAPABILITIES_OUT_TX_TSO_LBN)) | |
2334 | tso_versions |= BIT(1); | |
2335 | if (nic_data->datapath_caps2 & | |
2336 | (1 << MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_LBN)) | |
2337 | tso_versions |= BIT(2); | |
2338 | return tso_versions; | |
2339 | } | |
2340 | ||
8127d661 BH |
2341 | static void efx_ef10_tx_init(struct efx_tx_queue *tx_queue) |
2342 | { | |
044588b9 | 2343 | bool csum_offload = tx_queue->type & EFX_TXQ_TYPE_OUTER_CSUM; |
85d43fdb | 2344 | bool inner_csum = tx_queue->type & EFX_TXQ_TYPE_INNER_CSUM; |
8127d661 BH |
2345 | struct efx_channel *channel = tx_queue->channel; |
2346 | struct efx_nic *efx = tx_queue->efx; | |
8ee4c907 | 2347 | struct efx_ef10_nic_data *nic_data; |
8127d661 BH |
2348 | efx_qword_t *txd; |
2349 | int rc; | |
8ee4c907 AM |
2350 | |
2351 | nic_data = efx->nic_data; | |
8127d661 | 2352 | |
50663fe1 MH |
2353 | /* Only attempt to enable TX timestamping if we have the license for it, |
2354 | * otherwise TXQ init will fail | |
2355 | */ | |
2356 | if (!(nic_data->licensed_features & | |
6aa47c87 | 2357 | (1 << LICENSED_V3_FEATURES_TX_TIMESTAMPS_LBN))) { |
50663fe1 | 2358 | tx_queue->timestamping = false; |
6aa47c87 MH |
2359 | /* Disable sync events on this channel. */ |
2360 | if (efx->type->ptp_set_ts_sync_events) | |
2361 | efx->type->ptp_set_ts_sync_events(efx, false, false); | |
2362 | } | |
50663fe1 | 2363 | |
e9117e50 BK |
2364 | /* TSOv2 is a limited resource that can only be configured on a limited |
2365 | * number of queues. TSO without checksum offload is not really a thing, | |
2366 | * so we only enable it for those queues. | |
3990a8ff CM |
2367 | * TSOv2 cannot be used with Hardware timestamping, and is never needed |
2368 | * for XDP tx. | |
e9117e50 | 2369 | */ |
1679c72c EC |
2370 | if (efx_has_cap(efx, TX_TSO_V2)) { |
2371 | if ((csum_offload || inner_csum) && | |
2372 | !tx_queue->timestamping && !tx_queue->xdp_tx) { | |
2373 | tx_queue->tso_version = 2; | |
2374 | netif_dbg(efx, hw, efx->net_dev, "Using TSOv2 for channel %u\n", | |
2375 | channel->channel); | |
2376 | } | |
2377 | } else if (efx_has_cap(efx, TX_TSO)) { | |
2378 | tx_queue->tso_version = 1; | |
e9117e50 BK |
2379 | } |
2380 | ||
1679c72c | 2381 | rc = efx_mcdi_tx_init(tx_queue); |
8ee4c907 AM |
2382 | if (rc) |
2383 | goto fail; | |
8127d661 BH |
2384 | |
2385 | /* A previous user of this TX queue might have set us up the | |
2386 | * bomb by writing a descriptor to the TX push collector but | |
2387 | * not the doorbell. (Each collector belongs to a port, not a | |
2388 | * queue or function, so cannot easily be reset.) We must | |
2389 | * attempt to push a no-op descriptor in its place. | |
2390 | */ | |
2391 | tx_queue->buffer[0].flags = EFX_TX_BUF_OPTION; | |
2392 | tx_queue->insert_count = 1; | |
2393 | txd = efx_tx_desc(tx_queue, 0); | |
85d43fdb | 2394 | EFX_POPULATE_QWORD_7(*txd, |
8127d661 BH |
2395 | ESF_DZ_TX_DESC_IS_OPT, true, |
2396 | ESF_DZ_TX_OPTION_TYPE, | |
2397 | ESE_DZ_TX_OPTION_DESC_CRC_CSUM, | |
2398 | ESF_DZ_TX_OPTION_UDP_TCP_CSUM, csum_offload, | |
1679c72c | 2399 | ESF_DZ_TX_OPTION_IP_CSUM, csum_offload && tx_queue->tso_version != 2, |
85d43fdb | 2400 | ESF_DZ_TX_OPTION_INNER_UDP_TCP_CSUM, inner_csum, |
1679c72c | 2401 | ESF_DZ_TX_OPTION_INNER_IP_CSUM, inner_csum && tx_queue->tso_version != 2, |
b9b603d4 | 2402 | ESF_DZ_TX_TIMESTAMP, tx_queue->timestamping); |
8127d661 | 2403 | tx_queue->write_count = 1; |
93171b14 | 2404 | |
0ce8df66 EC |
2405 | if (tx_queue->tso_version == 2 && efx_has_cap(efx, TX_TSO_V2_ENCAP)) |
2406 | tx_queue->tso_encap = true; | |
2407 | ||
8127d661 BH |
2408 | wmb(); |
2409 | efx_ef10_push_tx_desc(tx_queue, txd); | |
2410 | ||
2411 | return; | |
2412 | ||
2413 | fail: | |
48ce5634 BH |
2414 | netdev_WARN(efx->net_dev, "failed to initialise TXQ %d\n", |
2415 | tx_queue->queue); | |
8127d661 BH |
2416 | } |
2417 | ||
8127d661 BH |
2418 | /* This writes to the TX_DESC_WPTR; write pointer for TX descriptor ring */ |
2419 | static inline void efx_ef10_notify_tx_desc(struct efx_tx_queue *tx_queue) | |
2420 | { | |
2421 | unsigned int write_ptr; | |
2422 | efx_dword_t reg; | |
2423 | ||
2424 | write_ptr = tx_queue->write_count & tx_queue->ptr_mask; | |
2425 | EFX_POPULATE_DWORD_1(reg, ERF_DZ_TX_DESC_WPTR_DWORD, write_ptr); | |
2426 | efx_writed_page(tx_queue->efx, ®, | |
2427 | ER_DZ_TX_DESC_UPD_DWORD, tx_queue->queue); | |
2428 | } | |
2429 | ||
e9117e50 BK |
2430 | #define EFX_EF10_MAX_TX_DESCRIPTOR_LEN 0x3fff |
2431 | ||
2432 | static unsigned int efx_ef10_tx_limit_len(struct efx_tx_queue *tx_queue, | |
2433 | dma_addr_t dma_addr, unsigned int len) | |
2434 | { | |
2435 | if (len > EFX_EF10_MAX_TX_DESCRIPTOR_LEN) { | |
2436 | /* If we need to break across multiple descriptors we should | |
2437 | * stop at a page boundary. This assumes the length limit is | |
2438 | * greater than the page size. | |
2439 | */ | |
2440 | dma_addr_t end = dma_addr + EFX_EF10_MAX_TX_DESCRIPTOR_LEN; | |
2441 | ||
2442 | BUILD_BUG_ON(EFX_EF10_MAX_TX_DESCRIPTOR_LEN < EFX_PAGE_SIZE); | |
2443 | len = (end & (~(EFX_PAGE_SIZE - 1))) - dma_addr; | |
2444 | } | |
2445 | ||
2446 | return len; | |
2447 | } | |
2448 | ||
8127d661 BH |
2449 | static void efx_ef10_tx_write(struct efx_tx_queue *tx_queue) |
2450 | { | |
2451 | unsigned int old_write_count = tx_queue->write_count; | |
2452 | struct efx_tx_buffer *buffer; | |
2453 | unsigned int write_ptr; | |
2454 | efx_qword_t *txd; | |
2455 | ||
1c0544d2 | 2456 | tx_queue->xmit_pending = false; |
b2663a4f MH |
2457 | if (unlikely(tx_queue->write_count == tx_queue->insert_count)) |
2458 | return; | |
8127d661 BH |
2459 | |
2460 | do { | |
2461 | write_ptr = tx_queue->write_count & tx_queue->ptr_mask; | |
2462 | buffer = &tx_queue->buffer[write_ptr]; | |
2463 | txd = efx_tx_desc(tx_queue, write_ptr); | |
2464 | ++tx_queue->write_count; | |
2465 | ||
2466 | /* Create TX descriptor ring entry */ | |
2467 | if (buffer->flags & EFX_TX_BUF_OPTION) { | |
2468 | *txd = buffer->option; | |
de1deff9 EC |
2469 | if (EFX_QWORD_FIELD(*txd, ESF_DZ_TX_OPTION_TYPE) == 1) |
2470 | /* PIO descriptor */ | |
2471 | tx_queue->packet_write_count = tx_queue->write_count; | |
8127d661 | 2472 | } else { |
de1deff9 | 2473 | tx_queue->packet_write_count = tx_queue->write_count; |
8127d661 BH |
2474 | BUILD_BUG_ON(EFX_TX_BUF_CONT != 1); |
2475 | EFX_POPULATE_QWORD_3( | |
2476 | *txd, | |
2477 | ESF_DZ_TX_KER_CONT, | |
2478 | buffer->flags & EFX_TX_BUF_CONT, | |
2479 | ESF_DZ_TX_KER_BYTE_CNT, buffer->len, | |
2480 | ESF_DZ_TX_KER_BUF_ADDR, buffer->dma_addr); | |
2481 | } | |
2482 | } while (tx_queue->write_count != tx_queue->insert_count); | |
2483 | ||
2484 | wmb(); /* Ensure descriptors are written before they are fetched */ | |
2485 | ||
2486 | if (efx_nic_may_push_tx_desc(tx_queue, old_write_count)) { | |
2487 | txd = efx_tx_desc(tx_queue, | |
2488 | old_write_count & tx_queue->ptr_mask); | |
2489 | efx_ef10_push_tx_desc(tx_queue, txd); | |
2490 | ++tx_queue->pushes; | |
2491 | } else { | |
2492 | efx_ef10_notify_tx_desc(tx_queue); | |
2493 | } | |
2494 | } | |
2495 | ||
fd14e5fd EC |
2496 | static int efx_ef10_probe_multicast_chaining(struct efx_nic *efx) |
2497 | { | |
2498 | struct efx_ef10_nic_data *nic_data = efx->nic_data; | |
2499 | unsigned int enabled, implemented; | |
2500 | bool want_workaround_26807; | |
2501 | int rc; | |
2502 | ||
2503 | rc = efx_mcdi_get_workarounds(efx, &implemented, &enabled); | |
2504 | if (rc == -ENOSYS) { | |
2505 | /* GET_WORKAROUNDS was implemented before this workaround, | |
2506 | * thus it must be unavailable in this firmware. | |
2507 | */ | |
2508 | nic_data->workaround_26807 = false; | |
2509 | return 0; | |
2510 | } | |
2511 | if (rc) | |
2512 | return rc; | |
2513 | want_workaround_26807 = | |
2514 | implemented & MC_CMD_GET_WORKAROUNDS_OUT_BUG26807; | |
2515 | nic_data->workaround_26807 = | |
2516 | !!(enabled & MC_CMD_GET_WORKAROUNDS_OUT_BUG26807); | |
2517 | ||
2518 | if (want_workaround_26807 && !nic_data->workaround_26807) { | |
2519 | unsigned int flags; | |
2520 | ||
2521 | rc = efx_mcdi_set_workaround(efx, | |
2522 | MC_CMD_WORKAROUND_BUG26807, | |
2523 | true, &flags); | |
2524 | if (!rc) { | |
2525 | if (flags & | |
2526 | 1 << MC_CMD_WORKAROUND_EXT_OUT_FLR_DONE_LBN) { | |
2527 | netif_info(efx, drv, efx->net_dev, | |
2528 | "other functions on NIC have been reset\n"); | |
2529 | ||
2530 | /* With MCFW v4.6.x and earlier, the | |
2531 | * boot count will have incremented, | |
2532 | * so re-read the warm_boot_count | |
2533 | * value now to ensure this function | |
2534 | * doesn't think it has changed next | |
2535 | * time it checks. | |
2536 | */ | |
2537 | rc = efx_ef10_get_warm_boot_count(efx); | |
2538 | if (rc >= 0) { | |
2539 | nic_data->warm_boot_count = rc; | |
2540 | rc = 0; | |
2541 | } | |
2542 | } | |
2543 | nic_data->workaround_26807 = true; | |
2544 | } else if (rc == -EPERM) { | |
2545 | rc = 0; | |
2546 | } | |
2547 | } | |
2548 | return rc; | |
2549 | } | |
2550 | ||
2551 | static int efx_ef10_filter_table_probe(struct efx_nic *efx) | |
2552 | { | |
2553 | struct efx_ef10_nic_data *nic_data = efx->nic_data; | |
2554 | int rc = efx_ef10_probe_multicast_chaining(efx); | |
ed02112c | 2555 | struct efx_mcdi_filter_vlan *vlan; |
fd14e5fd EC |
2556 | |
2557 | if (rc) | |
2558 | return rc; | |
77eb4074 | 2559 | down_write(&efx->filter_sem); |
fd14e5fd EC |
2560 | rc = efx_mcdi_filter_table_probe(efx, nic_data->workaround_26807); |
2561 | ||
2562 | if (rc) | |
77eb4074 | 2563 | goto out_unlock; |
fd14e5fd | 2564 | |
ed02112c EC |
2565 | list_for_each_entry(vlan, &nic_data->vlan_list, list) { |
2566 | rc = efx_mcdi_filter_add_vlan(efx, vlan->vid); | |
2567 | if (rc) | |
2568 | goto fail_add_vlan; | |
2569 | } | |
77eb4074 | 2570 | goto out_unlock; |
ed02112c EC |
2571 | |
2572 | fail_add_vlan: | |
2573 | efx_mcdi_filter_table_remove(efx); | |
77eb4074 EC |
2574 | out_unlock: |
2575 | up_write(&efx->filter_sem); | |
ed02112c | 2576 | return rc; |
fd14e5fd EC |
2577 | } |
2578 | ||
77eb4074 EC |
2579 | static void efx_ef10_filter_table_remove(struct efx_nic *efx) |
2580 | { | |
2581 | down_write(&efx->filter_sem); | |
2582 | efx_mcdi_filter_table_remove(efx); | |
2583 | up_write(&efx->filter_sem); | |
2584 | } | |
2585 | ||
8127d661 BH |
2586 | /* This creates an entry in the RX descriptor queue */ |
2587 | static inline void | |
2588 | efx_ef10_build_rx_desc(struct efx_rx_queue *rx_queue, unsigned int index) | |
a33a4c73 | 2589 | { |
8127d661 BH |
2590 | struct efx_rx_buffer *rx_buf; |
2591 | efx_qword_t *rxd; | |
a33a4c73 | 2592 | |
8127d661 BH |
2593 | rxd = efx_rx_desc(rx_queue, index); |
2594 | rx_buf = efx_rx_buffer(rx_queue, index); | |
2595 | EFX_POPULATE_QWORD_2(*rxd, | |
2596 | ESF_DZ_RX_KER_BYTE_CNT, rx_buf->len, | |
2597 | ESF_DZ_RX_KER_BUF_ADDR, rx_buf->dma_addr); | |
a33a4c73 EC |
2598 | } |
2599 | ||
8127d661 | 2600 | static void efx_ef10_rx_write(struct efx_rx_queue *rx_queue) |
a33a4c73 | 2601 | { |
8127d661 BH |
2602 | struct efx_nic *efx = rx_queue->efx; |
2603 | unsigned int write_count; | |
2604 | efx_dword_t reg; | |
a33a4c73 | 2605 | |
8127d661 BH |
2606 | /* Firmware requires that RX_DESC_WPTR be a multiple of 8 */ |
2607 | write_count = rx_queue->added_count & ~7; | |
2608 | if (rx_queue->notified_count == write_count) | |
a33a4c73 | 2609 | return; |
8127d661 | 2610 | |
8127d661 BH |
2611 | do |
2612 | efx_ef10_build_rx_desc( | |
2613 | rx_queue, | |
2614 | rx_queue->notified_count & rx_queue->ptr_mask); | |
2615 | while (++rx_queue->notified_count != write_count); | |
dcb4123c | 2616 | |
8127d661 BH |
2617 | wmb(); |
2618 | EFX_POPULATE_DWORD_1(reg, ERF_DZ_RX_DESC_WPTR, | |
2619 | write_count & rx_queue->ptr_mask); | |
2620 | efx_writed_page(efx, ®, ER_DZ_RX_DESC_UPD, | |
2621 | efx_rx_queue_index(rx_queue)); | |
2622 | } | |
8127d661 | 2623 | |
8127d661 | 2624 | static efx_mcdi_async_completer efx_ef10_rx_defer_refill_complete; |
8127d661 | 2625 | |
8127d661 BH |
2626 | static void efx_ef10_rx_defer_refill(struct efx_rx_queue *rx_queue) |
2627 | { | |
2628 | struct efx_channel *channel = efx_rx_queue_channel(rx_queue); | |
2629 | MCDI_DECLARE_BUF(inbuf, MC_CMD_DRIVER_EVENT_IN_LEN); | |
2630 | efx_qword_t event; | |
8127d661 | 2631 | |
8127d661 BH |
2632 | EFX_POPULATE_QWORD_2(event, |
2633 | ESF_DZ_EV_CODE, EFX_EF10_DRVGEN_EV, | |
2634 | ESF_DZ_EV_DATA, EFX_EF10_REFILL); | |
8127d661 | 2635 | |
8127d661 | 2636 | MCDI_SET_DWORD(inbuf, DRIVER_EVENT_IN_EVQ, channel->channel); |
267c0157 | 2637 | |
8127d661 BH |
2638 | /* MCDI_SET_QWORD is not appropriate here since EFX_POPULATE_* has |
2639 | * already swapped the data to little-endian order. | |
2640 | */ | |
2641 | memcpy(MCDI_PTR(inbuf, DRIVER_EVENT_IN_DATA), &event.u64[0], | |
2642 | sizeof(efx_qword_t)); | |
a33a4c73 | 2643 | |
8127d661 BH |
2644 | efx_mcdi_rpc_async(channel->efx, MC_CMD_DRIVER_EVENT, |
2645 | inbuf, sizeof(inbuf), 0, | |
2646 | efx_ef10_rx_defer_refill_complete, 0); | |
8127d661 BH |
2647 | } |
2648 | ||
8127d661 BH |
2649 | static void |
2650 | efx_ef10_rx_defer_refill_complete(struct efx_nic *efx, unsigned long cookie, | |
2651 | int rc, efx_dword_t *outbuf, | |
2652 | size_t outlen_actual) | |
8127d661 | 2653 | { |
8127d661 | 2654 | /* nothing to do */ |
8127d661 BH |
2655 | } |
2656 | ||
8127d661 | 2657 | static int efx_ef10_ev_init(struct efx_channel *channel) |
8127d661 | 2658 | { |
8127d661 BH |
2659 | struct efx_nic *efx = channel->efx; |
2660 | struct efx_ef10_nic_data *nic_data; | |
4438b587 | 2661 | bool use_v2, cut_thru; |
8127d661 | 2662 | |
8127d661 | 2663 | nic_data = efx->nic_data; |
4438b587 AM |
2664 | use_v2 = nic_data->datapath_caps2 & |
2665 | 1 << MC_CMD_GET_CAPABILITIES_V2_OUT_INIT_EVQ_V2_LBN; | |
2666 | cut_thru = !(nic_data->datapath_caps & | |
2667 | 1 << MC_CMD_GET_CAPABILITIES_OUT_RX_BATCHING_LBN); | |
fd14e5fd | 2668 | return efx_mcdi_ev_init(channel, cut_thru, use_v2); |
267c0157 | 2669 | } |
8127d661 | 2670 | |
8127d661 BH |
2671 | static void efx_ef10_handle_rx_wrong_queue(struct efx_rx_queue *rx_queue, |
2672 | unsigned int rx_queue_label) | |
267c0157 | 2673 | { |
8127d661 | 2674 | struct efx_nic *efx = rx_queue->efx; |
267c0157 | 2675 | |
8127d661 BH |
2676 | netif_info(efx, hw, efx->net_dev, |
2677 | "rx event arrived on queue %d labeled as queue %u\n", | |
2678 | efx_rx_queue_index(rx_queue), rx_queue_label); | |
8127d661 | 2679 | |
8127d661 | 2680 | efx_schedule_reset(efx, RESET_TYPE_DISABLE); |
267c0157 JC |
2681 | } |
2682 | ||
8127d661 BH |
2683 | static void |
2684 | efx_ef10_handle_rx_bad_lbits(struct efx_rx_queue *rx_queue, | |
2685 | unsigned int actual, unsigned int expected) | |
42356d9a | 2686 | { |
8127d661 BH |
2687 | unsigned int dropped = (actual - expected) & rx_queue->ptr_mask; |
2688 | struct efx_nic *efx = rx_queue->efx; | |
42356d9a | 2689 | |
8127d661 BH |
2690 | netif_info(efx, hw, efx->net_dev, |
2691 | "dropped %d events (index=%d expected=%d)\n", | |
2692 | dropped, actual, expected); | |
42356d9a | 2693 | |
8127d661 | 2694 | efx_schedule_reset(efx, RESET_TYPE_DISABLE); |
42356d9a EC |
2695 | } |
2696 | ||
8127d661 BH |
2697 | /* partially received RX was aborted. clean up. */ |
2698 | static void efx_ef10_handle_rx_abort(struct efx_rx_queue *rx_queue) | |
a707d188 | 2699 | { |
8127d661 | 2700 | unsigned int rx_desc_ptr; |
a707d188 | 2701 | |
8127d661 BH |
2702 | netif_dbg(rx_queue->efx, hw, rx_queue->efx->net_dev, |
2703 | "scattered RX aborted (dropping %u buffers)\n", | |
2704 | rx_queue->scatter_n); | |
a707d188 | 2705 | |
8127d661 | 2706 | rx_desc_ptr = rx_queue->removed_count & rx_queue->ptr_mask; |
a707d188 | 2707 | |
8127d661 BH |
2708 | efx_rx_packet(rx_queue, rx_desc_ptr, rx_queue->scatter_n, |
2709 | 0, EFX_RX_PKT_DISCARD); | |
e0a65e3c | 2710 | |
8127d661 BH |
2711 | rx_queue->removed_count += rx_queue->scatter_n; |
2712 | rx_queue->scatter_n = 0; | |
2713 | rx_queue->scatter_len = 0; | |
2714 | ++efx_rx_queue_channel(rx_queue)->n_rx_nodesc_trunc; | |
42356d9a EC |
2715 | } |
2716 | ||
a0ee3541 JC |
2717 | static u16 efx_ef10_handle_rx_event_errors(struct efx_channel *channel, |
2718 | unsigned int n_packets, | |
2719 | unsigned int rx_encap_hdr, | |
2720 | unsigned int rx_l3_class, | |
2721 | unsigned int rx_l4_class, | |
2722 | const efx_qword_t *event) | |
42356d9a | 2723 | { |
a0ee3541 | 2724 | struct efx_nic *efx = channel->efx; |
6978729f | 2725 | bool handled = false; |
e0a65e3c | 2726 | |
a0ee3541 | 2727 | if (EFX_QWORD_FIELD(*event, ESF_DZ_RX_ECRC_ERR)) { |
6978729f EC |
2728 | if (!(efx->net_dev->features & NETIF_F_RXALL)) { |
2729 | if (!efx->loopback_selftest) | |
2730 | channel->n_rx_eth_crc_err += n_packets; | |
2731 | return EFX_RX_PKT_DISCARD; | |
2732 | } | |
2733 | handled = true; | |
42356d9a | 2734 | } |
a0ee3541 JC |
2735 | if (EFX_QWORD_FIELD(*event, ESF_DZ_RX_IPCKSUM_ERR)) { |
2736 | if (unlikely(rx_encap_hdr != ESE_EZ_ENCAP_HDR_VXLAN && | |
2737 | rx_l3_class != ESE_DZ_L3_CLASS_IP4 && | |
2738 | rx_l3_class != ESE_DZ_L3_CLASS_IP4_FRAG && | |
2739 | rx_l3_class != ESE_DZ_L3_CLASS_IP6 && | |
2740 | rx_l3_class != ESE_DZ_L3_CLASS_IP6_FRAG)) | |
2741 | netdev_WARN(efx->net_dev, | |
2742 | "invalid class for RX_IPCKSUM_ERR: event=" | |
2743 | EFX_QWORD_FMT "\n", | |
2744 | EFX_QWORD_VAL(*event)); | |
2745 | if (!efx->loopback_selftest) | |
2746 | *(rx_encap_hdr ? | |
2747 | &channel->n_rx_outer_ip_hdr_chksum_err : | |
2748 | &channel->n_rx_ip_hdr_chksum_err) += n_packets; | |
267c0157 | 2749 | return 0; |
267c0157 | 2750 | } |
a0ee3541 JC |
2751 | if (EFX_QWORD_FIELD(*event, ESF_DZ_RX_TCPUDP_CKSUM_ERR)) { |
2752 | if (unlikely(rx_encap_hdr != ESE_EZ_ENCAP_HDR_VXLAN && | |
2753 | ((rx_l3_class != ESE_DZ_L3_CLASS_IP4 && | |
2754 | rx_l3_class != ESE_DZ_L3_CLASS_IP6) || | |
d8d8ccf2 BK |
2755 | (rx_l4_class != ESE_FZ_L4_CLASS_TCP && |
2756 | rx_l4_class != ESE_FZ_L4_CLASS_UDP)))) | |
a0ee3541 JC |
2757 | netdev_WARN(efx->net_dev, |
2758 | "invalid class for RX_TCPUDP_CKSUM_ERR: event=" | |
2759 | EFX_QWORD_FMT "\n", | |
2760 | EFX_QWORD_VAL(*event)); | |
2761 | if (!efx->loopback_selftest) | |
2762 | *(rx_encap_hdr ? | |
2763 | &channel->n_rx_outer_tcp_udp_chksum_err : | |
2764 | &channel->n_rx_tcp_udp_chksum_err) += n_packets; | |
267c0157 | 2765 | return 0; |
a0ee3541 JC |
2766 | } |
2767 | if (EFX_QWORD_FIELD(*event, ESF_EZ_RX_IP_INNER_CHKSUM_ERR)) { | |
2768 | if (unlikely(!rx_encap_hdr)) | |
2769 | netdev_WARN(efx->net_dev, | |
2770 | "invalid encapsulation type for RX_IP_INNER_CHKSUM_ERR: event=" | |
2771 | EFX_QWORD_FMT "\n", | |
2772 | EFX_QWORD_VAL(*event)); | |
2773 | else if (unlikely(rx_l3_class != ESE_DZ_L3_CLASS_IP4 && | |
2774 | rx_l3_class != ESE_DZ_L3_CLASS_IP4_FRAG && | |
2775 | rx_l3_class != ESE_DZ_L3_CLASS_IP6 && | |
2776 | rx_l3_class != ESE_DZ_L3_CLASS_IP6_FRAG)) | |
2777 | netdev_WARN(efx->net_dev, | |
2778 | "invalid class for RX_IP_INNER_CHKSUM_ERR: event=" | |
2779 | EFX_QWORD_FMT "\n", | |
2780 | EFX_QWORD_VAL(*event)); | |
2781 | if (!efx->loopback_selftest) | |
2782 | channel->n_rx_inner_ip_hdr_chksum_err += n_packets; | |
2783 | return 0; | |
2784 | } | |
2785 | if (EFX_QWORD_FIELD(*event, ESF_EZ_RX_TCP_UDP_INNER_CHKSUM_ERR)) { | |
2786 | if (unlikely(!rx_encap_hdr)) | |
2787 | netdev_WARN(efx->net_dev, | |
2788 | "invalid encapsulation type for RX_TCP_UDP_INNER_CHKSUM_ERR: event=" | |
2789 | EFX_QWORD_FMT "\n", | |
2790 | EFX_QWORD_VAL(*event)); | |
2791 | else if (unlikely((rx_l3_class != ESE_DZ_L3_CLASS_IP4 && | |
2792 | rx_l3_class != ESE_DZ_L3_CLASS_IP6) || | |
d8d8ccf2 BK |
2793 | (rx_l4_class != ESE_FZ_L4_CLASS_TCP && |
2794 | rx_l4_class != ESE_FZ_L4_CLASS_UDP))) | |
a0ee3541 JC |
2795 | netdev_WARN(efx->net_dev, |
2796 | "invalid class for RX_TCP_UDP_INNER_CHKSUM_ERR: event=" | |
2797 | EFX_QWORD_FMT "\n", | |
2798 | EFX_QWORD_VAL(*event)); | |
2799 | if (!efx->loopback_selftest) | |
2800 | channel->n_rx_inner_tcp_udp_chksum_err += n_packets; | |
2801 | return 0; | |
2802 | } | |
8127d661 | 2803 | |
6978729f | 2804 | WARN_ON(!handled); /* No error bits were recognised */ |
a0ee3541 | 2805 | return 0; |
8127d661 BH |
2806 | } |
2807 | ||
8127d661 BH |
2808 | static int efx_ef10_handle_rx_event(struct efx_channel *channel, |
2809 | const efx_qword_t *event) | |
8127d661 | 2810 | { |
a0ee3541 JC |
2811 | unsigned int rx_bytes, next_ptr_lbits, rx_queue_label; |
2812 | unsigned int rx_l3_class, rx_l4_class, rx_encap_hdr; | |
8127d661 BH |
2813 | unsigned int n_descs, n_packets, i; |
2814 | struct efx_nic *efx = channel->efx; | |
45b2449e | 2815 | struct efx_ef10_nic_data *nic_data = efx->nic_data; |
8127d661 | 2816 | struct efx_rx_queue *rx_queue; |
a0ee3541 | 2817 | efx_qword_t errors; |
8127d661 BH |
2818 | bool rx_cont; |
2819 | u16 flags = 0; | |
8127d661 | 2820 | |
6aa7de05 | 2821 | if (unlikely(READ_ONCE(efx->reset_pending))) |
8127d661 | 2822 | return 0; |
8127d661 | 2823 | |
8127d661 BH |
2824 | /* Basic packet information */ |
2825 | rx_bytes = EFX_QWORD_FIELD(*event, ESF_DZ_RX_BYTES); | |
2826 | next_ptr_lbits = EFX_QWORD_FIELD(*event, ESF_DZ_RX_DSC_PTR_LBITS); | |
2827 | rx_queue_label = EFX_QWORD_FIELD(*event, ESF_DZ_RX_QLABEL); | |
a0ee3541 | 2828 | rx_l3_class = EFX_QWORD_FIELD(*event, ESF_DZ_RX_L3_CLASS); |
d8d8ccf2 | 2829 | rx_l4_class = EFX_QWORD_FIELD(*event, ESF_FZ_RX_L4_CLASS); |
8127d661 | 2830 | rx_cont = EFX_QWORD_FIELD(*event, ESF_DZ_RX_CONT); |
a0ee3541 JC |
2831 | rx_encap_hdr = |
2832 | nic_data->datapath_caps & | |
2833 | (1 << MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_LBN) ? | |
2834 | EFX_QWORD_FIELD(*event, ESF_EZ_RX_ENCAP_HDR) : | |
2835 | ESE_EZ_ENCAP_HDR_NONE; | |
8127d661 | 2836 | |
48ce5634 BH |
2837 | if (EFX_QWORD_FIELD(*event, ESF_DZ_RX_DROP_EVENT)) |
2838 | netdev_WARN(efx->net_dev, "saw RX_DROP_EVENT: event=" | |
2839 | EFX_QWORD_FMT "\n", | |
2840 | EFX_QWORD_VAL(*event)); | |
8127d661 | 2841 | |
8127d661 | 2842 | rx_queue = efx_channel_get_rx_queue(channel); |
8127d661 | 2843 | |
8127d661 BH |
2844 | if (unlikely(rx_queue_label != efx_rx_queue_index(rx_queue))) |
2845 | efx_ef10_handle_rx_wrong_queue(rx_queue, rx_queue_label); | |
8127d661 | 2846 | |
8127d661 BH |
2847 | n_descs = ((next_ptr_lbits - rx_queue->removed_count) & |
2848 | ((1 << ESF_DZ_RX_DSC_PTR_LBITS_WIDTH) - 1)); | |
8127d661 | 2849 | |
8127d661 | 2850 | if (n_descs != rx_queue->scatter_n + 1) { |
92a04168 | 2851 | struct efx_ef10_nic_data *nic_data = efx->nic_data; |
8127d661 | 2852 | |
8127d661 BH |
2853 | /* detect rx abort */ |
2854 | if (unlikely(n_descs == rx_queue->scatter_n)) { | |
48ce5634 BH |
2855 | if (rx_queue->scatter_n == 0 || rx_bytes != 0) |
2856 | netdev_WARN(efx->net_dev, | |
2857 | "invalid RX abort: scatter_n=%u event=" | |
2858 | EFX_QWORD_FMT "\n", | |
2859 | rx_queue->scatter_n, | |
2860 | EFX_QWORD_VAL(*event)); | |
8127d661 BH |
2861 | efx_ef10_handle_rx_abort(rx_queue); |
2862 | return 0; | |
2863 | } | |
8127d661 | 2864 | |
92a04168 BH |
2865 | /* Check that RX completion merging is valid, i.e. |
2866 | * the current firmware supports it and this is a | |
2867 | * non-scattered packet. | |
2868 | */ | |
2869 | if (!(nic_data->datapath_caps & | |
2870 | (1 << MC_CMD_GET_CAPABILITIES_OUT_RX_BATCHING_LBN)) || | |
2871 | rx_queue->scatter_n != 0 || rx_cont) { | |
8127d661 BH |
2872 | efx_ef10_handle_rx_bad_lbits( |
2873 | rx_queue, next_ptr_lbits, | |
2874 | (rx_queue->removed_count + | |
2875 | rx_queue->scatter_n + 1) & | |
2876 | ((1 << ESF_DZ_RX_DSC_PTR_LBITS_WIDTH) - 1)); | |
2877 | return 0; | |
2878 | } | |
8127d661 | 2879 | |
8127d661 BH |
2880 | /* Merged completion for multiple non-scattered packets */ |
2881 | rx_queue->scatter_n = 1; | |
2882 | rx_queue->scatter_len = 0; | |
2883 | n_packets = n_descs; | |
2884 | ++channel->n_rx_merge_events; | |
2885 | channel->n_rx_merge_packets += n_packets; | |
2886 | flags |= EFX_RX_PKT_PREFIX_LEN; | |
8127d661 | 2887 | } else { |
8127d661 BH |
2888 | ++rx_queue->scatter_n; |
2889 | rx_queue->scatter_len += rx_bytes; | |
2890 | if (rx_cont) | |
2891 | return 0; | |
2892 | n_packets = 1; | |
8127d661 | 2893 | } |
7665d1ab | 2894 | |
a0ee3541 JC |
2895 | EFX_POPULATE_QWORD_5(errors, ESF_DZ_RX_ECRC_ERR, 1, |
2896 | ESF_DZ_RX_IPCKSUM_ERR, 1, | |
2897 | ESF_DZ_RX_TCPUDP_CKSUM_ERR, 1, | |
2898 | ESF_EZ_RX_IP_INNER_CHKSUM_ERR, 1, | |
2899 | ESF_EZ_RX_TCP_UDP_INNER_CHKSUM_ERR, 1); | |
2900 | EFX_AND_QWORD(errors, *event, errors); | |
2901 | if (unlikely(!EFX_QWORD_IS_ZERO(errors))) { | |
2902 | flags |= efx_ef10_handle_rx_event_errors(channel, n_packets, | |
90d2ea9f | 2903 | rx_encap_hdr, |
a0ee3541 | 2904 | rx_l3_class, rx_l4_class, |
90d2ea9f | 2905 | event); |
8127d661 | 2906 | } else { |
d8d8ccf2 BK |
2907 | bool tcpudp = rx_l4_class == ESE_FZ_L4_CLASS_TCP || |
2908 | rx_l4_class == ESE_FZ_L4_CLASS_UDP; | |
8127d661 | 2909 | |
da50ae2e JC |
2910 | switch (rx_encap_hdr) { |
2911 | case ESE_EZ_ENCAP_HDR_VXLAN: /* VxLAN or GENEVE */ | |
2912 | flags |= EFX_RX_PKT_CSUMMED; /* outer UDP csum */ | |
2913 | if (tcpudp) | |
2914 | flags |= EFX_RX_PKT_CSUM_LEVEL; /* inner L4 */ | |
2915 | break; | |
2916 | case ESE_EZ_ENCAP_HDR_GRE: | |
2917 | case ESE_EZ_ENCAP_HDR_NONE: | |
2918 | if (tcpudp) | |
2919 | flags |= EFX_RX_PKT_CSUMMED; | |
2920 | break; | |
2921 | default: | |
2922 | netdev_WARN(efx->net_dev, | |
2923 | "unknown encapsulation type: event=" | |
2924 | EFX_QWORD_FMT "\n", | |
2925 | EFX_QWORD_VAL(*event)); | |
2926 | } | |
8127d661 | 2927 | } |
8127d661 | 2928 | |
d8d8ccf2 | 2929 | if (rx_l4_class == ESE_FZ_L4_CLASS_TCP) |
8127d661 | 2930 | flags |= EFX_RX_PKT_TCP; |
8127d661 | 2931 | |
8127d661 | 2932 | channel->irq_mod_score += 2 * n_packets; |
8127d661 | 2933 | |
8127d661 BH |
2934 | /* Handle received packet(s) */ |
2935 | for (i = 0; i < n_packets; i++) { | |
2936 | efx_rx_packet(rx_queue, | |
2937 | rx_queue->removed_count & rx_queue->ptr_mask, | |
2938 | rx_queue->scatter_n, rx_queue->scatter_len, | |
2939 | flags); | |
2940 | rx_queue->removed_count += rx_queue->scatter_n; | |
8127d661 | 2941 | } |
8127d661 | 2942 | |
8127d661 BH |
2943 | rx_queue->scatter_n = 0; |
2944 | rx_queue->scatter_len = 0; | |
8127d661 | 2945 | |
8127d661 | 2946 | return n_packets; |
8127d661 BH |
2947 | } |
2948 | ||
b9b603d4 | 2949 | static u32 efx_ef10_extract_event_ts(efx_qword_t *event) |
8127d661 | 2950 | { |
b9b603d4 | 2951 | u32 tstamp; |
8127d661 | 2952 | |
b9b603d4 MH |
2953 | tstamp = EFX_QWORD_FIELD(*event, TX_TIMESTAMP_EVENT_TSTAMP_DATA_HI); |
2954 | tstamp <<= 16; | |
2955 | tstamp |= EFX_QWORD_FIELD(*event, TX_TIMESTAMP_EVENT_TSTAMP_DATA_LO); | |
8127d661 | 2956 | |
b9b603d4 | 2957 | return tstamp; |
8127d661 BH |
2958 | } |
2959 | ||
4aaf2c52 | 2960 | static int |
8127d661 | 2961 | efx_ef10_handle_tx_event(struct efx_channel *channel, efx_qword_t *event) |
34813fe2 | 2962 | { |
8127d661 BH |
2963 | struct efx_nic *efx = channel->efx; |
2964 | struct efx_tx_queue *tx_queue; | |
2965 | unsigned int tx_ev_desc_ptr; | |
2966 | unsigned int tx_ev_q_label; | |
b9b603d4 | 2967 | unsigned int tx_ev_type; |
4aaf2c52 | 2968 | int work_done; |
b9b603d4 | 2969 | u64 ts_part; |
34813fe2 | 2970 | |
6aa7de05 | 2971 | if (unlikely(READ_ONCE(efx->reset_pending))) |
4aaf2c52 | 2972 | return 0; |
34813fe2 | 2973 | |
8127d661 | 2974 | if (unlikely(EFX_QWORD_FIELD(*event, ESF_DZ_TX_DROP_EVENT))) |
4aaf2c52 | 2975 | return 0; |
34813fe2 | 2976 | |
b9b603d4 | 2977 | /* Get the transmit queue */ |
8127d661 | 2978 | tx_ev_q_label = EFX_QWORD_FIELD(*event, ESF_DZ_TX_QLABEL); |
172e269e | 2979 | tx_queue = channel->tx_queue + (tx_ev_q_label % EFX_MAX_TXQ_PER_CHANNEL); |
34813fe2 | 2980 | |
b9b603d4 MH |
2981 | if (!tx_queue->timestamping) { |
2982 | /* Transmit completion */ | |
2983 | tx_ev_desc_ptr = EFX_QWORD_FIELD(*event, ESF_DZ_TX_DESCR_INDX); | |
4aaf2c52 | 2984 | return efx_xmit_done(tx_queue, tx_ev_desc_ptr & tx_queue->ptr_mask); |
7ac0dd9d AR |
2985 | } |
2986 | ||
b9b603d4 | 2987 | /* Transmit timestamps are only available for 8XXX series. They result |
3b4f06c7 TZ |
2988 | * in up to three events per packet. These occur in order, and are: |
2989 | * - the normal completion event (may be omitted) | |
b9b603d4 MH |
2990 | * - the low part of the timestamp |
2991 | * - the high part of the timestamp | |
2992 | * | |
3b4f06c7 TZ |
2993 | * It's possible for multiple completion events to appear before the |
2994 | * corresponding timestamps. So we can for example get: | |
2995 | * COMP N | |
2996 | * COMP N+1 | |
2997 | * TS_LO N | |
2998 | * TS_HI N | |
2999 | * TS_LO N+1 | |
3000 | * TS_HI N+1 | |
3001 | * | |
3002 | * In addition it's also possible for the adjacent completions to be | |
3003 | * merged, so we may not see COMP N above. As such, the completion | |
3004 | * events are not very useful here. | |
3005 | * | |
b9b603d4 MH |
3006 | * Each part of the timestamp is itself split across two 16 bit |
3007 | * fields in the event. | |
3008 | */ | |
3009 | tx_ev_type = EFX_QWORD_FIELD(*event, ESF_EZ_TX_SOFT1); | |
4aaf2c52 | 3010 | work_done = 0; |
8127d661 | 3011 | |
b9b603d4 MH |
3012 | switch (tx_ev_type) { |
3013 | case TX_TIMESTAMP_EVENT_TX_EV_COMPLETION: | |
3b4f06c7 | 3014 | /* Ignore this event - see above. */ |
b9b603d4 | 3015 | break; |
9b410801 | 3016 | |
b9b603d4 MH |
3017 | case TX_TIMESTAMP_EVENT_TX_EV_TSTAMP_LO: |
3018 | ts_part = efx_ef10_extract_event_ts(event); | |
3019 | tx_queue->completed_timestamp_minor = ts_part; | |
3020 | break; | |
9b410801 | 3021 | |
b9b603d4 MH |
3022 | case TX_TIMESTAMP_EVENT_TX_EV_TSTAMP_HI: |
3023 | ts_part = efx_ef10_extract_event_ts(event); | |
3024 | tx_queue->completed_timestamp_major = ts_part; | |
9b410801 | 3025 | |
3b4f06c7 | 3026 | efx_xmit_done_single(tx_queue); |
4aaf2c52 | 3027 | work_done = 1; |
b9b603d4 | 3028 | break; |
9b410801 | 3029 | |
b9b603d4 MH |
3030 | default: |
3031 | netif_err(efx, hw, efx->net_dev, | |
3032 | "channel %d unknown tx event type %d (data " | |
3033 | EFX_QWORD_FMT ")\n", | |
3034 | channel->channel, tx_ev_type, | |
3035 | EFX_QWORD_VAL(*event)); | |
3036 | break; | |
e4478ad1 | 3037 | } |
4aaf2c52 ÍH |
3038 | |
3039 | return work_done; | |
8127d661 | 3040 | } |
e4478ad1 | 3041 | |
8127d661 BH |
3042 | static void |
3043 | efx_ef10_handle_driver_event(struct efx_channel *channel, efx_qword_t *event) | |
3044 | { | |
3045 | struct efx_nic *efx = channel->efx; | |
3046 | int subcode; | |
8127d661 | 3047 | |
8127d661 | 3048 | subcode = EFX_QWORD_FIELD(*event, ESF_DZ_DRV_SUB_CODE); |
12fb0da4 | 3049 | |
8127d661 BH |
3050 | switch (subcode) { |
3051 | case ESE_DZ_DRV_TIMER_EV: | |
3052 | case ESE_DZ_DRV_WAKE_UP_EV: | |
3053 | break; | |
3054 | case ESE_DZ_DRV_START_UP_EV: | |
3055 | /* event queue init complete. ok. */ | |
3056 | break; | |
3057 | default: | |
3058 | netif_err(efx, hw, efx->net_dev, | |
3059 | "channel %d unknown driver event type %d" | |
3060 | " (data " EFX_QWORD_FMT ")\n", | |
3061 | channel->channel, subcode, | |
3062 | EFX_QWORD_VAL(*event)); | |
34813fe2 | 3063 | |
34813fe2 | 3064 | } |
8127d661 BH |
3065 | } |
3066 | ||
8127d661 BH |
3067 | static void efx_ef10_handle_driver_generated_event(struct efx_channel *channel, |
3068 | efx_qword_t *event) | |
8127d661 | 3069 | { |
8127d661 BH |
3070 | struct efx_nic *efx = channel->efx; |
3071 | u32 subcode; | |
8127d661 | 3072 | |
8127d661 | 3073 | subcode = EFX_QWORD_FIELD(*event, EFX_DWORD_0); |
8127d661 | 3074 | |
8127d661 BH |
3075 | switch (subcode) { |
3076 | case EFX_EF10_TEST: | |
3077 | channel->event_test_cpu = raw_smp_processor_id(); | |
3078 | break; | |
3079 | case EFX_EF10_REFILL: | |
3080 | /* The queue must be empty, so we won't receive any rx | |
3081 | * events, so efx_process_channel() won't refill the | |
3082 | * queue. Refill it here | |
3083 | */ | |
cce28794 | 3084 | efx_fast_push_rx_descriptors(&channel->rx_queue, true); |
8127d661 BH |
3085 | break; |
3086 | default: | |
3087 | netif_err(efx, hw, efx->net_dev, | |
3088 | "channel %d unknown driver event type %u" | |
3089 | " (data " EFX_QWORD_FMT ")\n", | |
3090 | channel->channel, (unsigned) subcode, | |
3091 | EFX_QWORD_VAL(*event)); | |
3092 | } | |
3093 | } | |
2d3d4ec0 | 3094 | |
4aaf2c52 ÍH |
3095 | #define EFX_NAPI_MAX_TX 512 |
3096 | ||
8127d661 BH |
3097 | static int efx_ef10_ev_process(struct efx_channel *channel, int quota) |
3098 | { | |
3099 | struct efx_nic *efx = channel->efx; | |
3100 | efx_qword_t event, *p_event; | |
3101 | unsigned int read_ptr; | |
4aaf2c52 | 3102 | int spent_tx = 0; |
8127d661 | 3103 | int spent = 0; |
4aaf2c52 | 3104 | int ev_code; |
2d3d4ec0 | 3105 | |
75363a46 EB |
3106 | if (quota <= 0) |
3107 | return spent; | |
8127d661 | 3108 | |
8127d661 | 3109 | read_ptr = channel->eventq_read_ptr; |
8127d661 | 3110 | |
8127d661 BH |
3111 | for (;;) { |
3112 | p_event = efx_event(channel, read_ptr); | |
3113 | event = *p_event; | |
2d3d4ec0 | 3114 | |
8127d661 BH |
3115 | if (!efx_event_present(&event)) |
3116 | break; | |
8127d661 | 3117 | |
8127d661 | 3118 | EFX_SET_QWORD(*p_event); |
8127d661 | 3119 | |
8127d661 | 3120 | ++read_ptr; |
dd98708c | 3121 | |
8127d661 | 3122 | ev_code = EFX_QWORD_FIELD(event, ESF_DZ_EV_CODE); |
0d322413 | 3123 | |
8127d661 BH |
3124 | netif_vdbg(efx, drv, efx->net_dev, |
3125 | "processing event on %d " EFX_QWORD_FMT "\n", | |
3126 | channel->channel, EFX_QWORD_VAL(event)); | |
8127d661 | 3127 | |
8127d661 BH |
3128 | switch (ev_code) { |
3129 | case ESE_DZ_EV_CODE_MCDI_EV: | |
3130 | efx_mcdi_process_event(channel, &event); | |
3131 | break; | |
3132 | case ESE_DZ_EV_CODE_RX_EV: | |
3133 | spent += efx_ef10_handle_rx_event(channel, &event); | |
3134 | if (spent >= quota) { | |
3135 | /* XXX can we split a merged event to | |
3136 | * avoid going over-quota? | |
3137 | */ | |
3138 | spent = quota; | |
3139 | goto out; | |
3140 | } | |
3141 | break; | |
3142 | case ESE_DZ_EV_CODE_TX_EV: | |
4aaf2c52 ÍH |
3143 | spent_tx += efx_ef10_handle_tx_event(channel, &event); |
3144 | if (spent_tx >= EFX_NAPI_MAX_TX) { | |
3145 | spent = quota; | |
3146 | goto out; | |
3147 | } | |
8127d661 BH |
3148 | break; |
3149 | case ESE_DZ_EV_CODE_DRIVER_EV: | |
3150 | efx_ef10_handle_driver_event(channel, &event); | |
3151 | if (++spent == quota) | |
3152 | goto out; | |
3153 | break; | |
3154 | case EFX_EF10_DRVGEN_EV: | |
3155 | efx_ef10_handle_driver_generated_event(channel, &event); | |
3156 | break; | |
3157 | default: | |
3158 | netif_err(efx, hw, efx->net_dev, | |
3159 | "channel %d unknown event type %d" | |
3160 | " (data " EFX_QWORD_FMT ")\n", | |
3161 | channel->channel, ev_code, | |
3162 | EFX_QWORD_VAL(event)); | |
3163 | } | |
8127d661 BH |
3164 | } |
3165 | ||
8127d661 BH |
3166 | out: |
3167 | channel->eventq_read_ptr = read_ptr; | |
3168 | return spent; | |
8127d661 BH |
3169 | } |
3170 | ||
8127d661 | 3171 | static void efx_ef10_ev_read_ack(struct efx_channel *channel) |
6a37958b | 3172 | { |
8127d661 BH |
3173 | struct efx_nic *efx = channel->efx; |
3174 | efx_dword_t rptr; | |
6a37958b | 3175 | |
8127d661 BH |
3176 | if (EFX_EF10_WORKAROUND_35388(efx)) { |
3177 | BUILD_BUG_ON(EFX_MIN_EVQ_SIZE < | |
3178 | (1 << ERF_DD_EVQ_IND_RPTR_WIDTH)); | |
3179 | BUILD_BUG_ON(EFX_MAX_EVQ_SIZE > | |
3180 | (1 << 2 * ERF_DD_EVQ_IND_RPTR_WIDTH)); | |
c2bebe37 | 3181 | |
8127d661 BH |
3182 | EFX_POPULATE_DWORD_2(rptr, ERF_DD_EVQ_IND_RPTR_FLAGS, |
3183 | EFE_DD_EVQ_IND_RPTR_FLAGS_HIGH, | |
3184 | ERF_DD_EVQ_IND_RPTR, | |
3185 | (channel->eventq_read_ptr & | |
3186 | channel->eventq_mask) >> | |
3187 | ERF_DD_EVQ_IND_RPTR_WIDTH); | |
3188 | efx_writed_page(efx, &rptr, ER_DD_EVQ_INDIRECT, | |
3189 | channel->channel); | |
3190 | EFX_POPULATE_DWORD_2(rptr, ERF_DD_EVQ_IND_RPTR_FLAGS, | |
3191 | EFE_DD_EVQ_IND_RPTR_FLAGS_LOW, | |
3192 | ERF_DD_EVQ_IND_RPTR, | |
3193 | channel->eventq_read_ptr & | |
3194 | ((1 << ERF_DD_EVQ_IND_RPTR_WIDTH) - 1)); | |
3195 | efx_writed_page(efx, &rptr, ER_DD_EVQ_INDIRECT, | |
3196 | channel->channel); | |
3197 | } else { | |
3198 | EFX_POPULATE_DWORD_1(rptr, ERF_DZ_EVQ_RPTR, | |
3199 | channel->eventq_read_ptr & | |
3200 | channel->eventq_mask); | |
3201 | efx_writed_page(efx, &rptr, ER_DZ_EVQ_RPTR, channel->channel); | |
e65a5109 | 3202 | } |
6a37958b AR |
3203 | } |
3204 | ||
8127d661 | 3205 | static void efx_ef10_ev_test_generate(struct efx_channel *channel) |
8127d661 | 3206 | { |
8127d661 BH |
3207 | MCDI_DECLARE_BUF(inbuf, MC_CMD_DRIVER_EVENT_IN_LEN); |
3208 | struct efx_nic *efx = channel->efx; | |
3209 | efx_qword_t event; | |
3210 | int rc; | |
8127d661 | 3211 | |
8127d661 BH |
3212 | EFX_POPULATE_QWORD_2(event, |
3213 | ESF_DZ_EV_CODE, EFX_EF10_DRVGEN_EV, | |
3214 | ESF_DZ_EV_DATA, EFX_EF10_TEST); | |
b3a3c03c | 3215 | |
8127d661 | 3216 | MCDI_SET_DWORD(inbuf, DRIVER_EVENT_IN_EVQ, channel->channel); |
b3a3c03c | 3217 | |
8127d661 BH |
3218 | /* MCDI_SET_QWORD is not appropriate here since EFX_POPULATE_* has |
3219 | * already swapped the data to little-endian order. | |
3220 | */ | |
3221 | memcpy(MCDI_PTR(inbuf, DRIVER_EVENT_IN_DATA), &event.u64[0], | |
3222 | sizeof(efx_qword_t)); | |
822b96f8 | 3223 | |
8127d661 BH |
3224 | rc = efx_mcdi_rpc(efx, MC_CMD_DRIVER_EVENT, inbuf, sizeof(inbuf), |
3225 | NULL, 0, NULL); | |
3226 | if (rc != 0) | |
3227 | goto fail; | |
8127d661 | 3228 | |
8127d661 | 3229 | return; |
c70d6815 | 3230 | |
8127d661 BH |
3231 | fail: |
3232 | WARN_ON(true); | |
3233 | netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); | |
822b96f8 DP |
3234 | } |
3235 | ||
e283546c | 3236 | static void efx_ef10_prepare_flr(struct efx_nic *efx) |
822b96f8 | 3237 | { |
e283546c | 3238 | atomic_set(&efx->active_queues, 0); |
8127d661 BH |
3239 | } |
3240 | ||
7a186f47 DP |
3241 | static int efx_ef10_vport_set_mac_address(struct efx_nic *efx) |
3242 | { | |
3243 | struct efx_ef10_nic_data *nic_data = efx->nic_data; | |
3244 | u8 mac_old[ETH_ALEN]; | |
3245 | int rc, rc2; | |
3246 | ||
3247 | /* Only reconfigure a PF-created vport */ | |
3248 | if (is_zero_ether_addr(nic_data->vport_mac)) | |
3249 | return 0; | |
3250 | ||
3251 | efx_device_detach_sync(efx); | |
3252 | efx_net_stop(efx->net_dev); | |
77eb4074 | 3253 | efx_ef10_filter_table_remove(efx); |
7a186f47 | 3254 | |
dfcabb07 | 3255 | rc = efx_ef10_vadaptor_free(efx, efx->vport_id); |
7a186f47 DP |
3256 | if (rc) |
3257 | goto restore_filters; | |
3258 | ||
3259 | ether_addr_copy(mac_old, nic_data->vport_mac); | |
dfcabb07 | 3260 | rc = efx_ef10_vport_del_mac(efx, efx->vport_id, |
7a186f47 DP |
3261 | nic_data->vport_mac); |
3262 | if (rc) | |
3263 | goto restore_vadaptor; | |
3264 | ||
dfcabb07 | 3265 | rc = efx_ef10_vport_add_mac(efx, efx->vport_id, |
7a186f47 DP |
3266 | efx->net_dev->dev_addr); |
3267 | if (!rc) { | |
3268 | ether_addr_copy(nic_data->vport_mac, efx->net_dev->dev_addr); | |
3269 | } else { | |
dfcabb07 | 3270 | rc2 = efx_ef10_vport_add_mac(efx, efx->vport_id, mac_old); |
7a186f47 DP |
3271 | if (rc2) { |
3272 | /* Failed to add original MAC, so clear vport_mac */ | |
3273 | eth_zero_addr(nic_data->vport_mac); | |
3274 | goto reset_nic; | |
3275 | } | |
3276 | } | |
3277 | ||
3278 | restore_vadaptor: | |
dfcabb07 | 3279 | rc2 = efx_ef10_vadaptor_alloc(efx, efx->vport_id); |
7a186f47 DP |
3280 | if (rc2) |
3281 | goto reset_nic; | |
3282 | restore_filters: | |
fd14e5fd | 3283 | rc2 = efx_ef10_filter_table_probe(efx); |
7a186f47 DP |
3284 | if (rc2) |
3285 | goto reset_nic; | |
3286 | ||
3287 | rc2 = efx_net_open(efx->net_dev); | |
3288 | if (rc2) | |
3289 | goto reset_nic; | |
3290 | ||
9c568fd8 | 3291 | efx_device_attach_if_not_resetting(efx); |
7a186f47 DP |
3292 | |
3293 | return rc; | |
3294 | ||
3295 | reset_nic: | |
3296 | netif_err(efx, drv, efx->net_dev, | |
3297 | "Failed to restore when changing MAC address - scheduling reset\n"); | |
3298 | efx_schedule_reset(efx, RESET_TYPE_DATAPATH); | |
3299 | ||
3300 | return rc ? rc : rc2; | |
3301 | } | |
3302 | ||
910c8789 SS |
3303 | static int efx_ef10_set_mac_address(struct efx_nic *efx) |
3304 | { | |
3305 | MCDI_DECLARE_BUF(inbuf, MC_CMD_VADAPTOR_SET_MAC_IN_LEN); | |
910c8789 SS |
3306 | bool was_enabled = efx->port_enabled; |
3307 | int rc; | |
3308 | ||
a8aed7b3 JC |
3309 | #ifdef CONFIG_SFC_SRIOV |
3310 | /* If this function is a VF and we have access to the parent PF, | |
3311 | * then use the PF control path to attempt to change the VF MAC address. | |
3312 | */ | |
3313 | if (efx->pci_dev->is_virtfn && efx->pci_dev->physfn) { | |
3314 | struct efx_nic *efx_pf = pci_get_drvdata(efx->pci_dev->physfn); | |
3315 | struct efx_ef10_nic_data *nic_data = efx->nic_data; | |
3316 | u8 mac[ETH_ALEN]; | |
3317 | ||
3318 | /* net_dev->dev_addr can be zeroed by efx_net_stop in | |
3319 | * efx_ef10_sriov_set_vf_mac, so pass in a copy. | |
3320 | */ | |
3321 | ether_addr_copy(mac, efx->net_dev->dev_addr); | |
3322 | ||
3323 | rc = efx_ef10_sriov_set_vf_mac(efx_pf, nic_data->vf_index, mac); | |
3324 | if (!rc) | |
3325 | return 0; | |
3326 | ||
3327 | netif_dbg(efx, drv, efx->net_dev, | |
3328 | "Updating VF mac via PF failed (%d), setting directly\n", | |
3329 | rc); | |
3330 | } | |
3331 | #endif | |
3332 | ||
910c8789 SS |
3333 | efx_device_detach_sync(efx); |
3334 | efx_net_stop(efx->net_dev); | |
d248953a MH |
3335 | |
3336 | mutex_lock(&efx->mac_lock); | |
77eb4074 | 3337 | efx_ef10_filter_table_remove(efx); |
910c8789 SS |
3338 | |
3339 | ether_addr_copy(MCDI_PTR(inbuf, VADAPTOR_SET_MAC_IN_MACADDR), | |
3340 | efx->net_dev->dev_addr); | |
3341 | MCDI_SET_DWORD(inbuf, VADAPTOR_SET_MAC_IN_UPSTREAM_PORT_ID, | |
dfcabb07 | 3342 | efx->vport_id); |
535a6177 DP |
3343 | rc = efx_mcdi_rpc_quiet(efx, MC_CMD_VADAPTOR_SET_MAC, inbuf, |
3344 | sizeof(inbuf), NULL, 0, NULL); | |
910c8789 | 3345 | |
fd14e5fd | 3346 | efx_ef10_filter_table_probe(efx); |
d248953a MH |
3347 | mutex_unlock(&efx->mac_lock); |
3348 | ||
910c8789 SS |
3349 | if (was_enabled) |
3350 | efx_net_open(efx->net_dev); | |
9c568fd8 | 3351 | efx_device_attach_if_not_resetting(efx); |
910c8789 | 3352 | |
9e9f665a DP |
3353 | if (rc == -EPERM) { |
3354 | netif_err(efx, drv, efx->net_dev, | |
3355 | "Cannot change MAC address; use sfboot to enable" | |
3356 | " mac-spoofing on this interface\n"); | |
7a186f47 DP |
3357 | } else if (rc == -ENOSYS && !efx_ef10_is_vf(efx)) { |
3358 | /* If the active MCFW does not support MC_CMD_VADAPTOR_SET_MAC | |
3359 | * fall-back to the method of changing the MAC address on the | |
3360 | * vport. This only applies to PFs because such versions of | |
3361 | * MCFW do not support VFs. | |
3362 | */ | |
3363 | rc = efx_ef10_vport_set_mac_address(efx); | |
cbad52e9 | 3364 | } else if (rc) { |
535a6177 DP |
3365 | efx_mcdi_display_error(efx, MC_CMD_VADAPTOR_SET_MAC, |
3366 | sizeof(inbuf), NULL, 0, rc); | |
9e9f665a DP |
3367 | } |
3368 | ||
910c8789 SS |
3369 | return rc; |
3370 | } | |
3371 | ||
af3c38d3 | 3372 | static int efx_ef10_mac_reconfigure(struct efx_nic *efx, bool mtu_only) |
8127d661 | 3373 | { |
af3c38d3 | 3374 | WARN_ON(!mutex_is_locked(&efx->mac_lock)); |
8127d661 | 3375 | |
90c914d2 | 3376 | efx_mcdi_filter_sync_rx_mode(efx); |
862f894c | 3377 | |
af3c38d3 EC |
3378 | if (mtu_only && efx_has_cap(efx, SET_MAC_ENHANCED)) |
3379 | return efx_mcdi_set_mtu(efx); | |
3380 | return efx_mcdi_set_mac(efx); | |
862f894c SS |
3381 | } |
3382 | ||
74cd60a4 JC |
3383 | static int efx_ef10_start_bist(struct efx_nic *efx, u32 bist_type) |
3384 | { | |
3385 | MCDI_DECLARE_BUF(inbuf, MC_CMD_START_BIST_IN_LEN); | |
3386 | ||
3387 | MCDI_SET_DWORD(inbuf, START_BIST_IN_TYPE, bist_type); | |
3388 | return efx_mcdi_rpc(efx, MC_CMD_START_BIST, inbuf, sizeof(inbuf), | |
3389 | NULL, 0, NULL); | |
3390 | } | |
3391 | ||
3392 | /* MC BISTs follow a different poll mechanism to phy BISTs. | |
3393 | * The BIST is done in the poll handler on the MC, and the MCDI command | |
3394 | * will block until the BIST is done. | |
3395 | */ | |
3396 | static int efx_ef10_poll_bist(struct efx_nic *efx) | |
3397 | { | |
3398 | int rc; | |
3399 | MCDI_DECLARE_BUF(outbuf, MC_CMD_POLL_BIST_OUT_LEN); | |
3400 | size_t outlen; | |
3401 | u32 result; | |
3402 | ||
3403 | rc = efx_mcdi_rpc(efx, MC_CMD_POLL_BIST, NULL, 0, | |
3404 | outbuf, sizeof(outbuf), &outlen); | |
3405 | if (rc != 0) | |
3406 | return rc; | |
3407 | ||
3408 | if (outlen < MC_CMD_POLL_BIST_OUT_LEN) | |
3409 | return -EIO; | |
3410 | ||
3411 | result = MCDI_DWORD(outbuf, POLL_BIST_OUT_RESULT); | |
3412 | switch (result) { | |
3413 | case MC_CMD_POLL_BIST_PASSED: | |
3414 | netif_dbg(efx, hw, efx->net_dev, "BIST passed.\n"); | |
3415 | return 0; | |
3416 | case MC_CMD_POLL_BIST_TIMEOUT: | |
3417 | netif_err(efx, hw, efx->net_dev, "BIST timed out\n"); | |
3418 | return -EIO; | |
3419 | case MC_CMD_POLL_BIST_FAILED: | |
3420 | netif_err(efx, hw, efx->net_dev, "BIST failed.\n"); | |
3421 | return -EIO; | |
3422 | default: | |
3423 | netif_err(efx, hw, efx->net_dev, | |
3424 | "BIST returned unknown result %u", result); | |
3425 | return -EIO; | |
3426 | } | |
3427 | } | |
3428 | ||
3429 | static int efx_ef10_run_bist(struct efx_nic *efx, u32 bist_type) | |
3430 | { | |
3431 | int rc; | |
3432 | ||
3433 | netif_dbg(efx, drv, efx->net_dev, "starting BIST type %u\n", bist_type); | |
3434 | ||
3435 | rc = efx_ef10_start_bist(efx, bist_type); | |
3436 | if (rc != 0) | |
3437 | return rc; | |
3438 | ||
3439 | return efx_ef10_poll_bist(efx); | |
3440 | } | |
3441 | ||
3442 | static int | |
3443 | efx_ef10_test_chip(struct efx_nic *efx, struct efx_self_tests *tests) | |
3444 | { | |
3445 | int rc, rc2; | |
3446 | ||
3447 | efx_reset_down(efx, RESET_TYPE_WORLD); | |
3448 | ||
3449 | rc = efx_mcdi_rpc(efx, MC_CMD_ENABLE_OFFLINE_BIST, | |
3450 | NULL, 0, NULL, 0, NULL); | |
3451 | if (rc != 0) | |
3452 | goto out; | |
3453 | ||
3454 | tests->memory = efx_ef10_run_bist(efx, MC_CMD_MC_MEM_BIST) ? -1 : 1; | |
3455 | tests->registers = efx_ef10_run_bist(efx, MC_CMD_REG_BIST) ? -1 : 1; | |
3456 | ||
3457 | rc = efx_mcdi_reset(efx, RESET_TYPE_WORLD); | |
3458 | ||
3459 | out: | |
27324820 DP |
3460 | if (rc == -EPERM) |
3461 | rc = 0; | |
74cd60a4 JC |
3462 | rc2 = efx_reset_up(efx, RESET_TYPE_WORLD, rc == 0); |
3463 | return rc ? rc : rc2; | |
3464 | } | |
3465 | ||
8127d661 BH |
3466 | #ifdef CONFIG_SFC_MTD |
3467 | ||
3468 | struct efx_ef10_nvram_type_info { | |
3469 | u16 type, type_mask; | |
3470 | u8 port; | |
3471 | const char *name; | |
3472 | }; | |
3473 | ||
3474 | static const struct efx_ef10_nvram_type_info efx_ef10_nvram_types[] = { | |
3475 | { NVRAM_PARTITION_TYPE_MC_FIRMWARE, 0, 0, "sfc_mcfw" }, | |
3476 | { NVRAM_PARTITION_TYPE_MC_FIRMWARE_BACKUP, 0, 0, "sfc_mcfw_backup" }, | |
3477 | { NVRAM_PARTITION_TYPE_EXPANSION_ROM, 0, 0, "sfc_exp_rom" }, | |
3478 | { NVRAM_PARTITION_TYPE_STATIC_CONFIG, 0, 0, "sfc_static_cfg" }, | |
3479 | { NVRAM_PARTITION_TYPE_DYNAMIC_CONFIG, 0, 0, "sfc_dynamic_cfg" }, | |
3480 | { NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT0, 0, 0, "sfc_exp_rom_cfg" }, | |
3481 | { NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT1, 0, 1, "sfc_exp_rom_cfg" }, | |
3482 | { NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT2, 0, 2, "sfc_exp_rom_cfg" }, | |
3483 | { NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT3, 0, 3, "sfc_exp_rom_cfg" }, | |
a84f3bf9 | 3484 | { NVRAM_PARTITION_TYPE_LICENSE, 0, 0, "sfc_license" }, |
8127d661 | 3485 | { NVRAM_PARTITION_TYPE_PHY_MIN, 0xff, 0, "sfc_phy_fw" }, |
cea0604d EC |
3486 | { NVRAM_PARTITION_TYPE_MUM_FIRMWARE, 0, 0, "sfc_mumfw" }, |
3487 | { NVRAM_PARTITION_TYPE_EXPANSION_UEFI, 0, 0, "sfc_uefi" }, | |
5fb1beec BK |
3488 | { NVRAM_PARTITION_TYPE_DYNCONFIG_DEFAULTS, 0, 0, "sfc_dynamic_cfg_dflt" }, |
3489 | { NVRAM_PARTITION_TYPE_ROMCONFIG_DEFAULTS, 0, 0, "sfc_exp_rom_cfg_dflt" }, | |
3490 | { NVRAM_PARTITION_TYPE_STATUS, 0, 0, "sfc_status" }, | |
b9ad6de5 PF |
3491 | { NVRAM_PARTITION_TYPE_BUNDLE, 0, 0, "sfc_bundle" }, |
3492 | { NVRAM_PARTITION_TYPE_BUNDLE_METADATA, 0, 0, "sfc_bundle_metadata" }, | |
8127d661 | 3493 | }; |
33664635 | 3494 | #define EF10_NVRAM_PARTITION_COUNT ARRAY_SIZE(efx_ef10_nvram_types) |
8127d661 BH |
3495 | |
3496 | static int efx_ef10_mtd_probe_partition(struct efx_nic *efx, | |
3497 | struct efx_mcdi_mtd_partition *part, | |
33664635 EC |
3498 | unsigned int type, |
3499 | unsigned long *found) | |
8127d661 BH |
3500 | { |
3501 | MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_METADATA_IN_LEN); | |
3502 | MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_METADATA_OUT_LENMAX); | |
3503 | const struct efx_ef10_nvram_type_info *info; | |
d41987e9 | 3504 | size_t size, erase_size, write_size, outlen; |
33664635 | 3505 | int type_idx = 0; |
8127d661 BH |
3506 | bool protected; |
3507 | int rc; | |
3508 | ||
33664635 EC |
3509 | for (type_idx = 0; ; type_idx++) { |
3510 | if (type_idx == EF10_NVRAM_PARTITION_COUNT) | |
8127d661 | 3511 | return -ENODEV; |
33664635 | 3512 | info = efx_ef10_nvram_types + type_idx; |
8127d661 BH |
3513 | if ((type & ~info->type_mask) == info->type) |
3514 | break; | |
3515 | } | |
3516 | if (info->port != efx_port_num(efx)) | |
3517 | return -ENODEV; | |
3518 | ||
d41987e9 EC |
3519 | rc = efx_mcdi_nvram_info(efx, type, &size, &erase_size, &write_size, |
3520 | &protected); | |
8127d661 BH |
3521 | if (rc) |
3522 | return rc; | |
5fb1beec BK |
3523 | if (protected && |
3524 | (type != NVRAM_PARTITION_TYPE_DYNCONFIG_DEFAULTS && | |
3525 | type != NVRAM_PARTITION_TYPE_ROMCONFIG_DEFAULTS)) | |
3526 | /* Hide protected partitions that don't provide defaults. */ | |
3527 | return -ENODEV; | |
3528 | ||
8127d661 | 3529 | if (protected) |
5fb1beec BK |
3530 | /* Protected partitions are read only. */ |
3531 | erase_size = 0; | |
8127d661 | 3532 | |
33664635 EC |
3533 | /* If we've already exposed a partition of this type, hide this |
3534 | * duplicate. All operations on MTDs are keyed by the type anyway, | |
3535 | * so we can't act on the duplicate. | |
3536 | */ | |
3537 | if (__test_and_set_bit(type_idx, found)) | |
3538 | return -EEXIST; | |
3539 | ||
8127d661 BH |
3540 | part->nvram_type = type; |
3541 | ||
3542 | MCDI_SET_DWORD(inbuf, NVRAM_METADATA_IN_TYPE, type); | |
3543 | rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_METADATA, inbuf, sizeof(inbuf), | |
3544 | outbuf, sizeof(outbuf), &outlen); | |
3545 | if (rc) | |
3546 | return rc; | |
3547 | if (outlen < MC_CMD_NVRAM_METADATA_OUT_LENMIN) | |
3548 | return -EIO; | |
3549 | if (MCDI_DWORD(outbuf, NVRAM_METADATA_OUT_FLAGS) & | |
3550 | (1 << MC_CMD_NVRAM_METADATA_OUT_SUBTYPE_VALID_LBN)) | |
3551 | part->fw_subtype = MCDI_DWORD(outbuf, | |
3552 | NVRAM_METADATA_OUT_SUBTYPE); | |
3553 | ||
3554 | part->common.dev_type_name = "EF10 NVRAM manager"; | |
3555 | part->common.type_name = info->name; | |
3556 | ||
3557 | part->common.mtd.type = MTD_NORFLASH; | |
3558 | part->common.mtd.flags = MTD_CAP_NORFLASH; | |
3559 | part->common.mtd.size = size; | |
3560 | part->common.mtd.erasesize = erase_size; | |
cea0604d EC |
3561 | /* sfc_status is read-only */ |
3562 | if (!erase_size) | |
3563 | part->common.mtd.flags |= MTD_NO_ERASE; | |
8127d661 | 3564 | |
d41987e9 EC |
3565 | part->common.mtd.writesize = write_size; |
3566 | ||
8127d661 BH |
3567 | return 0; |
3568 | } | |
3569 | ||
3570 | static int efx_ef10_mtd_probe(struct efx_nic *efx) | |
3571 | { | |
3572 | MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_PARTITIONS_OUT_LENMAX); | |
c6528542 | 3573 | DECLARE_BITMAP(found, EF10_NVRAM_PARTITION_COUNT) = { 0 }; |
8127d661 BH |
3574 | struct efx_mcdi_mtd_partition *parts; |
3575 | size_t outlen, n_parts_total, i, n_parts; | |
3576 | unsigned int type; | |
3577 | int rc; | |
3578 | ||
3579 | ASSERT_RTNL(); | |
3580 | ||
3581 | BUILD_BUG_ON(MC_CMD_NVRAM_PARTITIONS_IN_LEN != 0); | |
3582 | rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_PARTITIONS, NULL, 0, | |
3583 | outbuf, sizeof(outbuf), &outlen); | |
3584 | if (rc) | |
3585 | return rc; | |
3586 | if (outlen < MC_CMD_NVRAM_PARTITIONS_OUT_LENMIN) | |
3587 | return -EIO; | |
3588 | ||
3589 | n_parts_total = MCDI_DWORD(outbuf, NVRAM_PARTITIONS_OUT_NUM_PARTITIONS); | |
3590 | if (n_parts_total > | |
3591 | MCDI_VAR_ARRAY_LEN(outlen, NVRAM_PARTITIONS_OUT_TYPE_ID)) | |
3592 | return -EIO; | |
3593 | ||
3594 | parts = kcalloc(n_parts_total, sizeof(*parts), GFP_KERNEL); | |
3595 | if (!parts) | |
3596 | return -ENOMEM; | |
3597 | ||
3598 | n_parts = 0; | |
3599 | for (i = 0; i < n_parts_total; i++) { | |
3600 | type = MCDI_ARRAY_DWORD(outbuf, NVRAM_PARTITIONS_OUT_TYPE_ID, | |
3601 | i); | |
33664635 EC |
3602 | rc = efx_ef10_mtd_probe_partition(efx, &parts[n_parts], type, |
3603 | found); | |
3604 | if (rc == -EEXIST || rc == -ENODEV) | |
3605 | continue; | |
3606 | if (rc) | |
8127d661 | 3607 | goto fail; |
33664635 | 3608 | n_parts++; |
8127d661 BH |
3609 | } |
3610 | ||
1fa89ffb TY |
3611 | if (!n_parts) { |
3612 | kfree(parts); | |
3613 | return 0; | |
3614 | } | |
3615 | ||
8127d661 BH |
3616 | rc = efx_mtd_add(efx, &parts[0].common, n_parts, sizeof(*parts)); |
3617 | fail: | |
3618 | if (rc) | |
3619 | kfree(parts); | |
3620 | return rc; | |
3621 | } | |
3622 | ||
3623 | #endif /* CONFIG_SFC_MTD */ | |
3624 | ||
3625 | static void efx_ef10_ptp_write_host_time(struct efx_nic *efx, u32 host_time) | |
3626 | { | |
3627 | _efx_writed(efx, cpu_to_le32(host_time), ER_DZ_MC_DB_LWRD); | |
3628 | } | |
3629 | ||
02246a7f SS |
3630 | static void efx_ef10_ptp_write_host_time_vf(struct efx_nic *efx, |
3631 | u32 host_time) {} | |
3632 | ||
bd9a265d JC |
3633 | static int efx_ef10_rx_enable_timestamping(struct efx_channel *channel, |
3634 | bool temp) | |
3635 | { | |
3636 | MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_LEN); | |
3637 | int rc; | |
3638 | ||
3639 | if (channel->sync_events_state == SYNC_EVENTS_REQUESTED || | |
3640 | channel->sync_events_state == SYNC_EVENTS_VALID || | |
3641 | (temp && channel->sync_events_state == SYNC_EVENTS_DISABLED)) | |
3642 | return 0; | |
3643 | channel->sync_events_state = SYNC_EVENTS_REQUESTED; | |
3644 | ||
3645 | MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_TIME_EVENT_SUBSCRIBE); | |
3646 | MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0); | |
3647 | MCDI_SET_DWORD(inbuf, PTP_IN_TIME_EVENT_SUBSCRIBE_QUEUE, | |
3648 | channel->channel); | |
3649 | ||
3650 | rc = efx_mcdi_rpc(channel->efx, MC_CMD_PTP, | |
3651 | inbuf, sizeof(inbuf), NULL, 0, NULL); | |
3652 | ||
3653 | if (rc != 0) | |
3654 | channel->sync_events_state = temp ? SYNC_EVENTS_QUIESCENT : | |
3655 | SYNC_EVENTS_DISABLED; | |
3656 | ||
3657 | return rc; | |
3658 | } | |
3659 | ||
3660 | static int efx_ef10_rx_disable_timestamping(struct efx_channel *channel, | |
3661 | bool temp) | |
3662 | { | |
3663 | MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_LEN); | |
3664 | int rc; | |
3665 | ||
3666 | if (channel->sync_events_state == SYNC_EVENTS_DISABLED || | |
3667 | (temp && channel->sync_events_state == SYNC_EVENTS_QUIESCENT)) | |
3668 | return 0; | |
3669 | if (channel->sync_events_state == SYNC_EVENTS_QUIESCENT) { | |
3670 | channel->sync_events_state = SYNC_EVENTS_DISABLED; | |
3671 | return 0; | |
3672 | } | |
3673 | channel->sync_events_state = temp ? SYNC_EVENTS_QUIESCENT : | |
3674 | SYNC_EVENTS_DISABLED; | |
3675 | ||
3676 | MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_TIME_EVENT_UNSUBSCRIBE); | |
3677 | MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0); | |
3678 | MCDI_SET_DWORD(inbuf, PTP_IN_TIME_EVENT_UNSUBSCRIBE_CONTROL, | |
3679 | MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_SINGLE); | |
3680 | MCDI_SET_DWORD(inbuf, PTP_IN_TIME_EVENT_UNSUBSCRIBE_QUEUE, | |
3681 | channel->channel); | |
3682 | ||
3683 | rc = efx_mcdi_rpc(channel->efx, MC_CMD_PTP, | |
3684 | inbuf, sizeof(inbuf), NULL, 0, NULL); | |
3685 | ||
3686 | return rc; | |
3687 | } | |
3688 | ||
3689 | static int efx_ef10_ptp_set_ts_sync_events(struct efx_nic *efx, bool en, | |
3690 | bool temp) | |
3691 | { | |
3692 | int (*set)(struct efx_channel *channel, bool temp); | |
3693 | struct efx_channel *channel; | |
3694 | ||
3695 | set = en ? | |
3696 | efx_ef10_rx_enable_timestamping : | |
3697 | efx_ef10_rx_disable_timestamping; | |
3698 | ||
2935e3c3 EC |
3699 | channel = efx_ptp_channel(efx); |
3700 | if (channel) { | |
bd9a265d JC |
3701 | int rc = set(channel, temp); |
3702 | if (en && rc != 0) { | |
3703 | efx_ef10_ptp_set_ts_sync_events(efx, false, temp); | |
3704 | return rc; | |
3705 | } | |
3706 | } | |
3707 | ||
3708 | return 0; | |
3709 | } | |
3710 | ||
02246a7f | 3711 | static int efx_ef10_ptp_set_ts_config_vf(struct efx_nic *efx, |
1ac23674 | 3712 | struct kernel_hwtstamp_config *init) |
02246a7f SS |
3713 | { |
3714 | return -EOPNOTSUPP; | |
3715 | } | |
3716 | ||
bd9a265d | 3717 | static int efx_ef10_ptp_set_ts_config(struct efx_nic *efx, |
1ac23674 | 3718 | struct kernel_hwtstamp_config *init) |
bd9a265d JC |
3719 | { |
3720 | int rc; | |
3721 | ||
3722 | switch (init->rx_filter) { | |
3723 | case HWTSTAMP_FILTER_NONE: | |
3724 | efx_ef10_ptp_set_ts_sync_events(efx, false, false); | |
3725 | /* if TX timestamping is still requested then leave PTP on */ | |
3726 | return efx_ptp_change_mode(efx, | |
3727 | init->tx_type != HWTSTAMP_TX_OFF, 0); | |
3728 | case HWTSTAMP_FILTER_ALL: | |
3729 | case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: | |
3730 | case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: | |
3731 | case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: | |
3732 | case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: | |
3733 | case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: | |
3734 | case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: | |
3735 | case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: | |
3736 | case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: | |
3737 | case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: | |
3738 | case HWTSTAMP_FILTER_PTP_V2_EVENT: | |
3739 | case HWTSTAMP_FILTER_PTP_V2_SYNC: | |
3740 | case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: | |
e3412575 | 3741 | case HWTSTAMP_FILTER_NTP_ALL: |
bd9a265d JC |
3742 | init->rx_filter = HWTSTAMP_FILTER_ALL; |
3743 | rc = efx_ptp_change_mode(efx, true, 0); | |
3744 | if (!rc) | |
3745 | rc = efx_ef10_ptp_set_ts_sync_events(efx, true, false); | |
3746 | if (rc) | |
3747 | efx_ptp_change_mode(efx, false, 0); | |
3748 | return rc; | |
3749 | default: | |
3750 | return -ERANGE; | |
3751 | } | |
3752 | } | |
3753 | ||
08a7b29b BK |
3754 | static int efx_ef10_get_phys_port_id(struct efx_nic *efx, |
3755 | struct netdev_phys_item_id *ppid) | |
3756 | { | |
3757 | struct efx_ef10_nic_data *nic_data = efx->nic_data; | |
3758 | ||
3759 | if (!is_valid_ether_addr(nic_data->port_id)) | |
3760 | return -EOPNOTSUPP; | |
3761 | ||
3762 | ppid->id_len = ETH_ALEN; | |
3763 | memcpy(ppid->id, nic_data->port_id, ppid->id_len); | |
3764 | ||
3765 | return 0; | |
3766 | } | |
3767 | ||
4a53ea8a AR |
3768 | static int efx_ef10_vlan_rx_add_vid(struct efx_nic *efx, __be16 proto, u16 vid) |
3769 | { | |
3770 | if (proto != htons(ETH_P_8021Q)) | |
3771 | return -EINVAL; | |
3772 | ||
3773 | return efx_ef10_add_vlan(efx, vid); | |
3774 | } | |
3775 | ||
3776 | static int efx_ef10_vlan_rx_kill_vid(struct efx_nic *efx, __be16 proto, u16 vid) | |
3777 | { | |
3778 | if (proto != htons(ETH_P_8021Q)) | |
3779 | return -EINVAL; | |
3780 | ||
3781 | return efx_ef10_del_vlan(efx, vid); | |
3782 | } | |
3783 | ||
e5fbd977 JC |
3784 | /* We rely on the MCDI wiping out our TX rings if it made any changes to the |
3785 | * ports table, ensuring that any TSO descriptors that were made on a now- | |
3786 | * removed tunnel port will be blown away and won't break things when we try | |
3787 | * to transmit them using the new ports table. | |
3788 | */ | |
3789 | static int efx_ef10_set_udp_tnl_ports(struct efx_nic *efx, bool unloading) | |
3790 | { | |
3791 | struct efx_ef10_nic_data *nic_data = efx->nic_data; | |
3792 | MCDI_DECLARE_BUF(inbuf, MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_LENMAX); | |
3793 | MCDI_DECLARE_BUF(outbuf, MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_OUT_LEN); | |
3794 | bool will_reset = false; | |
3795 | size_t num_entries = 0; | |
3796 | size_t inlen, outlen; | |
3797 | size_t i; | |
3798 | int rc; | |
3799 | efx_dword_t flags_and_num_entries; | |
3800 | ||
3801 | WARN_ON(!mutex_is_locked(&nic_data->udp_tunnels_lock)); | |
3802 | ||
3803 | nic_data->udp_tunnels_dirty = false; | |
3804 | ||
3805 | if (!(nic_data->datapath_caps & | |
3806 | (1 << MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_LBN))) { | |
9c568fd8 | 3807 | efx_device_attach_if_not_resetting(efx); |
e5fbd977 JC |
3808 | return 0; |
3809 | } | |
3810 | ||
3811 | BUILD_BUG_ON(ARRAY_SIZE(nic_data->udp_tunnels) > | |
3812 | MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_ENTRIES_MAXNUM); | |
3813 | ||
3814 | for (i = 0; i < ARRAY_SIZE(nic_data->udp_tunnels); ++i) { | |
205a55f4 JK |
3815 | if (nic_data->udp_tunnels[i].type != |
3816 | TUNNEL_ENCAP_UDP_PORT_ENTRY_INVALID) { | |
e5fbd977 JC |
3817 | efx_dword_t entry; |
3818 | ||
3819 | EFX_POPULATE_DWORD_2(entry, | |
3820 | TUNNEL_ENCAP_UDP_PORT_ENTRY_UDP_PORT, | |
3821 | ntohs(nic_data->udp_tunnels[i].port), | |
3822 | TUNNEL_ENCAP_UDP_PORT_ENTRY_PROTOCOL, | |
3823 | nic_data->udp_tunnels[i].type); | |
3824 | *_MCDI_ARRAY_DWORD(inbuf, | |
3825 | SET_TUNNEL_ENCAP_UDP_PORTS_IN_ENTRIES, | |
3826 | num_entries++) = entry; | |
3827 | } | |
3828 | } | |
3829 | ||
3830 | BUILD_BUG_ON((MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_NUM_ENTRIES_OFST - | |
3831 | MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_FLAGS_OFST) * 8 != | |
3832 | EFX_WORD_1_LBN); | |
3833 | BUILD_BUG_ON(MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_NUM_ENTRIES_LEN * 8 != | |
3834 | EFX_WORD_1_WIDTH); | |
3835 | EFX_POPULATE_DWORD_2(flags_and_num_entries, | |
3836 | MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_UNLOADING, | |
3837 | !!unloading, | |
3838 | EFX_WORD_1, num_entries); | |
3839 | *_MCDI_DWORD(inbuf, SET_TUNNEL_ENCAP_UDP_PORTS_IN_FLAGS) = | |
3840 | flags_and_num_entries; | |
3841 | ||
3842 | inlen = MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_LEN(num_entries); | |
3843 | ||
3844 | rc = efx_mcdi_rpc_quiet(efx, MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS, | |
3845 | inbuf, inlen, outbuf, sizeof(outbuf), &outlen); | |
3846 | if (rc == -EIO) { | |
3847 | /* Most likely the MC rebooted due to another function also | |
3848 | * setting its tunnel port list. Mark the tunnel port list as | |
3849 | * dirty, so it will be pushed upon coming up from the reboot. | |
3850 | */ | |
3851 | nic_data->udp_tunnels_dirty = true; | |
3852 | return 0; | |
3853 | } | |
3854 | ||
3855 | if (rc) { | |
3856 | /* expected not available on unprivileged functions */ | |
3857 | if (rc != -EPERM) | |
3858 | netif_warn(efx, drv, efx->net_dev, | |
3859 | "Unable to set UDP tunnel ports; rc=%d.\n", rc); | |
3860 | } else if (MCDI_DWORD(outbuf, SET_TUNNEL_ENCAP_UDP_PORTS_OUT_FLAGS) & | |
3861 | (1 << MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_OUT_RESETTING_LBN)) { | |
3862 | netif_info(efx, drv, efx->net_dev, | |
3863 | "Rebooting MC due to UDP tunnel port list change\n"); | |
3864 | will_reset = true; | |
3865 | if (unloading) | |
3866 | /* Delay for the MC reset to complete. This will make | |
3867 | * unloading other functions a bit smoother. This is a | |
3868 | * race, but the other unload will work whichever way | |
3869 | * it goes, this just avoids an unnecessary error | |
3870 | * message. | |
3871 | */ | |
3872 | msleep(100); | |
3873 | } | |
3874 | if (!will_reset && !unloading) { | |
3875 | /* The caller will have detached, relying on the MC reset to | |
3876 | * trigger a re-attach. Since there won't be an MC reset, we | |
3877 | * have to do the attach ourselves. | |
3878 | */ | |
9c568fd8 | 3879 | efx_device_attach_if_not_resetting(efx); |
e5fbd977 JC |
3880 | } |
3881 | ||
3882 | return rc; | |
3883 | } | |
3884 | ||
3885 | static int efx_ef10_udp_tnl_push_ports(struct efx_nic *efx) | |
3886 | { | |
3887 | struct efx_ef10_nic_data *nic_data = efx->nic_data; | |
3888 | int rc = 0; | |
3889 | ||
3890 | mutex_lock(&nic_data->udp_tunnels_lock); | |
3891 | if (nic_data->udp_tunnels_dirty) { | |
3892 | /* Make sure all TX are stopped while we modify the table, else | |
3893 | * we might race against an efx_features_check(). | |
3894 | */ | |
3895 | efx_device_detach_sync(efx); | |
3896 | rc = efx_ef10_set_udp_tnl_ports(efx, false); | |
3897 | } | |
3898 | mutex_unlock(&nic_data->udp_tunnels_lock); | |
3899 | return rc; | |
3900 | } | |
3901 | ||
205a55f4 JK |
3902 | static int efx_ef10_udp_tnl_set_port(struct net_device *dev, |
3903 | unsigned int table, unsigned int entry, | |
3904 | struct udp_tunnel_info *ti) | |
e5fbd977 | 3905 | { |
8cb03f4e | 3906 | struct efx_nic *efx = efx_netdev_priv(dev); |
205a55f4 JK |
3907 | struct efx_ef10_nic_data *nic_data; |
3908 | int efx_tunnel_type, rc; | |
e5fbd977 | 3909 | |
205a55f4 JK |
3910 | if (ti->type == UDP_TUNNEL_TYPE_VXLAN) |
3911 | efx_tunnel_type = TUNNEL_ENCAP_UDP_PORT_ENTRY_VXLAN; | |
3912 | else | |
3913 | efx_tunnel_type = TUNNEL_ENCAP_UDP_PORT_ENTRY_GENEVE; | |
e5fbd977 | 3914 | |
205a55f4 | 3915 | nic_data = efx->nic_data; |
e5fbd977 JC |
3916 | if (!(nic_data->datapath_caps & |
3917 | (1 << MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_LBN))) | |
205a55f4 | 3918 | return -EOPNOTSUPP; |
e5fbd977 JC |
3919 | |
3920 | mutex_lock(&nic_data->udp_tunnels_lock); | |
3921 | /* Make sure all TX are stopped while we add to the table, else we | |
3922 | * might race against an efx_features_check(). | |
3923 | */ | |
3924 | efx_device_detach_sync(efx); | |
205a55f4 JK |
3925 | nic_data->udp_tunnels[entry].type = efx_tunnel_type; |
3926 | nic_data->udp_tunnels[entry].port = ti->port; | |
3927 | rc = efx_ef10_set_udp_tnl_ports(efx, false); | |
e5fbd977 | 3928 | mutex_unlock(&nic_data->udp_tunnels_lock); |
205a55f4 | 3929 | |
e5fbd977 JC |
3930 | return rc; |
3931 | } | |
3932 | ||
3933 | /* Called under the TX lock with the TX queue running, hence no-one can be | |
3934 | * in the middle of updating the UDP tunnels table. However, they could | |
3935 | * have tried and failed the MCDI, in which case they'll have set the dirty | |
3936 | * flag before dropping their locks. | |
3937 | */ | |
3938 | static bool efx_ef10_udp_tnl_has_port(struct efx_nic *efx, __be16 port) | |
3939 | { | |
3940 | struct efx_ef10_nic_data *nic_data = efx->nic_data; | |
205a55f4 | 3941 | size_t i; |
e5fbd977 JC |
3942 | |
3943 | if (!(nic_data->datapath_caps & | |
3944 | (1 << MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_LBN))) | |
3945 | return false; | |
3946 | ||
3947 | if (nic_data->udp_tunnels_dirty) | |
3948 | /* SW table may not match HW state, so just assume we can't | |
3949 | * use any UDP tunnel offloads. | |
3950 | */ | |
3951 | return false; | |
3952 | ||
205a55f4 JK |
3953 | for (i = 0; i < ARRAY_SIZE(nic_data->udp_tunnels); ++i) |
3954 | if (nic_data->udp_tunnels[i].type != | |
3955 | TUNNEL_ENCAP_UDP_PORT_ENTRY_INVALID && | |
3956 | nic_data->udp_tunnels[i].port == port) | |
3957 | return true; | |
3958 | ||
3959 | return false; | |
e5fbd977 JC |
3960 | } |
3961 | ||
205a55f4 JK |
3962 | static int efx_ef10_udp_tnl_unset_port(struct net_device *dev, |
3963 | unsigned int table, unsigned int entry, | |
3964 | struct udp_tunnel_info *ti) | |
e5fbd977 | 3965 | { |
8cb03f4e | 3966 | struct efx_nic *efx = efx_netdev_priv(dev); |
205a55f4 | 3967 | struct efx_ef10_nic_data *nic_data; |
e5fbd977 JC |
3968 | int rc; |
3969 | ||
205a55f4 | 3970 | nic_data = efx->nic_data; |
e5fbd977 JC |
3971 | |
3972 | mutex_lock(&nic_data->udp_tunnels_lock); | |
3973 | /* Make sure all TX are stopped while we remove from the table, else we | |
3974 | * might race against an efx_features_check(). | |
3975 | */ | |
3976 | efx_device_detach_sync(efx); | |
205a55f4 JK |
3977 | nic_data->udp_tunnels[entry].type = TUNNEL_ENCAP_UDP_PORT_ENTRY_INVALID; |
3978 | nic_data->udp_tunnels[entry].port = 0; | |
3979 | rc = efx_ef10_set_udp_tnl_ports(efx, false); | |
e5fbd977 | 3980 | mutex_unlock(&nic_data->udp_tunnels_lock); |
205a55f4 | 3981 | |
e5fbd977 JC |
3982 | return rc; |
3983 | } | |
3984 | ||
205a55f4 JK |
3985 | static const struct udp_tunnel_nic_info efx_ef10_udp_tunnels = { |
3986 | .set_port = efx_ef10_udp_tnl_set_port, | |
3987 | .unset_port = efx_ef10_udp_tnl_unset_port, | |
3988 | .flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP, | |
3989 | .tables = { | |
3990 | { | |
3991 | .n_entries = 16, | |
3992 | .tunnel_types = UDP_TUNNEL_TYPE_VXLAN | | |
3993 | UDP_TUNNEL_TYPE_GENEVE, | |
3994 | }, | |
3995 | }, | |
3996 | }; | |
3997 | ||
9b46132c EC |
3998 | /* EF10 may have multiple datapath firmware variants within a |
3999 | * single version. Report which variants are running. | |
4000 | */ | |
4001 | static size_t efx_ef10_print_additional_fwver(struct efx_nic *efx, char *buf, | |
4002 | size_t len) | |
4003 | { | |
4004 | struct efx_ef10_nic_data *nic_data = efx->nic_data; | |
4005 | ||
4006 | return scnprintf(buf, len, " rx%x tx%x", | |
4007 | nic_data->rx_dpcpu_fw_id, | |
4008 | nic_data->tx_dpcpu_fw_id); | |
4009 | } | |
4010 | ||
be904b85 TZ |
4011 | static unsigned int ef10_check_caps(const struct efx_nic *efx, |
4012 | u8 flag, | |
4013 | u32 offset) | |
4014 | { | |
4015 | const struct efx_ef10_nic_data *nic_data = efx->nic_data; | |
4016 | ||
4017 | switch (offset) { | |
4018 | case(MC_CMD_GET_CAPABILITIES_V4_OUT_FLAGS1_OFST): | |
4019 | return nic_data->datapath_caps & BIT_ULL(flag); | |
4020 | case(MC_CMD_GET_CAPABILITIES_V4_OUT_FLAGS2_OFST): | |
4021 | return nic_data->datapath_caps2 & BIT_ULL(flag); | |
4022 | default: | |
4023 | return 0; | |
4024 | } | |
4025 | } | |
4026 | ||
000fe940 MH |
4027 | static unsigned int efx_ef10_recycle_ring_size(const struct efx_nic *efx) |
4028 | { | |
4029 | unsigned int ret = EFX_RECYCLE_RING_SIZE_10G; | |
4030 | ||
4031 | /* There is no difference between PFs and VFs. The side is based on | |
4032 | * the maximum link speed of a given NIC. | |
4033 | */ | |
4034 | switch (efx->pci_dev->device & 0xfff) { | |
4035 | case 0x0903: /* Farmingdale can do up to 10G */ | |
4036 | break; | |
4037 | case 0x0923: /* Greenport can do up to 40G */ | |
4038 | case 0x0a03: /* Medford can do up to 40G */ | |
4039 | ret *= 4; | |
4040 | break; | |
4041 | default: /* Medford2 can do up to 100G */ | |
4042 | ret *= 10; | |
4043 | } | |
4044 | ||
4045 | if (IS_ENABLED(CONFIG_PPC64)) | |
4046 | ret *= 4; | |
4047 | ||
4048 | return ret; | |
4049 | } | |
4050 | ||
100a9db5 AR |
4051 | #define EF10_OFFLOAD_FEATURES \ |
4052 | (NETIF_F_IP_CSUM | \ | |
4a53ea8a | 4053 | NETIF_F_HW_VLAN_CTAG_FILTER | \ |
100a9db5 AR |
4054 | NETIF_F_IPV6_CSUM | \ |
4055 | NETIF_F_RXHASH | \ | |
ca4a80e4 ÍH |
4056 | NETIF_F_NTUPLE | \ |
4057 | NETIF_F_SG | \ | |
4058 | NETIF_F_RXCSUM | \ | |
4059 | NETIF_F_RXALL) | |
100a9db5 | 4060 | |
02246a7f | 4061 | const struct efx_nic_type efx_hunt_a0_vf_nic_type = { |
6f7f8aa6 | 4062 | .is_vf = true, |
03714bbb | 4063 | .mem_bar = efx_ef10_vf_mem_bar, |
02246a7f SS |
4064 | .mem_map_size = efx_ef10_mem_map_size, |
4065 | .probe = efx_ef10_probe_vf, | |
4066 | .remove = efx_ef10_remove, | |
4067 | .dimension_resources = efx_ef10_dimension_resources, | |
4068 | .init = efx_ef10_init_nic, | |
d3142c19 | 4069 | .fini = efx_ef10_fini_nic, |
087e9025 | 4070 | .map_reset_reason = efx_ef10_map_reset_reason, |
02246a7f SS |
4071 | .map_reset_flags = efx_ef10_map_reset_flags, |
4072 | .reset = efx_ef10_reset, | |
4073 | .probe_port = efx_mcdi_port_probe, | |
4074 | .remove_port = efx_mcdi_port_remove, | |
d700fe01 | 4075 | .fini_dmaq = efx_fini_dmaq, |
02246a7f SS |
4076 | .prepare_flr = efx_ef10_prepare_flr, |
4077 | .finish_flr = efx_port_dummy_op_void, | |
4078 | .describe_stats = efx_ef10_describe_stats, | |
d7788196 | 4079 | .update_stats = efx_ef10_update_stats_vf, |
623b9988 | 4080 | .update_stats_atomic = efx_ef10_update_stats_atomic_vf, |
02246a7f SS |
4081 | .start_stats = efx_port_dummy_op_void, |
4082 | .pull_stats = efx_port_dummy_op_void, | |
4083 | .stop_stats = efx_port_dummy_op_void, | |
02246a7f | 4084 | .push_irq_moderation = efx_ef10_push_irq_moderation, |
af3c38d3 | 4085 | .reconfigure_mac = efx_ef10_mac_reconfigure, |
02246a7f SS |
4086 | .check_mac_fault = efx_mcdi_mac_check_fault, |
4087 | .reconfigure_port = efx_mcdi_port_reconfigure, | |
4088 | .get_wol = efx_ef10_get_wol_vf, | |
4089 | .set_wol = efx_ef10_set_wol_vf, | |
4090 | .resume_wol = efx_port_dummy_op_void, | |
4091 | .mcdi_request = efx_ef10_mcdi_request, | |
4092 | .mcdi_poll_response = efx_ef10_mcdi_poll_response, | |
4093 | .mcdi_read_response = efx_ef10_mcdi_read_response, | |
4094 | .mcdi_poll_reboot = efx_ef10_mcdi_poll_reboot, | |
c577e59e | 4095 | .mcdi_reboot_detected = efx_ef10_mcdi_reboot_detected, |
02246a7f SS |
4096 | .irq_enable_master = efx_port_dummy_op_void, |
4097 | .irq_test_generate = efx_ef10_irq_test_generate, | |
4098 | .irq_disable_non_ev = efx_port_dummy_op_void, | |
4099 | .irq_handle_msi = efx_ef10_msi_interrupt, | |
4100 | .irq_handle_legacy = efx_ef10_legacy_interrupt, | |
4101 | .tx_probe = efx_ef10_tx_probe, | |
4102 | .tx_init = efx_ef10_tx_init, | |
8ee4c907 | 4103 | .tx_remove = efx_mcdi_tx_remove, |
02246a7f | 4104 | .tx_write = efx_ef10_tx_write, |
e9117e50 | 4105 | .tx_limit_len = efx_ef10_tx_limit_len, |
51b35a45 | 4106 | .tx_enqueue = __efx_enqueue_skb, |
90c914d2 AM |
4107 | .rx_push_rss_config = efx_mcdi_vf_rx_push_rss_config, |
4108 | .rx_pull_rss_config = efx_mcdi_rx_pull_rss_config, | |
8da92642 AM |
4109 | .rx_probe = efx_mcdi_rx_probe, |
4110 | .rx_init = efx_mcdi_rx_init, | |
4111 | .rx_remove = efx_mcdi_rx_remove, | |
02246a7f SS |
4112 | .rx_write = efx_ef10_rx_write, |
4113 | .rx_defer_refill = efx_ef10_rx_defer_refill, | |
51b35a45 | 4114 | .rx_packet = __efx_rx_packet, |
4438b587 | 4115 | .ev_probe = efx_mcdi_ev_probe, |
02246a7f | 4116 | .ev_init = efx_ef10_ev_init, |
4438b587 AM |
4117 | .ev_fini = efx_mcdi_ev_fini, |
4118 | .ev_remove = efx_mcdi_ev_remove, | |
02246a7f SS |
4119 | .ev_process = efx_ef10_ev_process, |
4120 | .ev_read_ack = efx_ef10_ev_read_ack, | |
4121 | .ev_test_generate = efx_ef10_ev_test_generate, | |
fd14e5fd | 4122 | .filter_table_probe = efx_ef10_filter_table_probe, |
90c914d2 | 4123 | .filter_table_restore = efx_mcdi_filter_table_restore, |
77eb4074 | 4124 | .filter_table_remove = efx_ef10_filter_table_remove, |
90c914d2 AM |
4125 | .filter_update_rx_scatter = efx_mcdi_update_rx_scatter, |
4126 | .filter_insert = efx_mcdi_filter_insert, | |
4127 | .filter_remove_safe = efx_mcdi_filter_remove_safe, | |
4128 | .filter_get_safe = efx_mcdi_filter_get_safe, | |
4129 | .filter_clear_rx = efx_mcdi_filter_clear_rx, | |
4130 | .filter_count_rx_used = efx_mcdi_filter_count_rx_used, | |
4131 | .filter_get_rx_id_limit = efx_mcdi_filter_get_rx_id_limit, | |
4132 | .filter_get_rx_ids = efx_mcdi_filter_get_rx_ids, | |
02246a7f | 4133 | #ifdef CONFIG_RFS_ACCEL |
90c914d2 | 4134 | .filter_rfs_expire_one = efx_mcdi_filter_rfs_expire_one, |
02246a7f SS |
4135 | #endif |
4136 | #ifdef CONFIG_SFC_MTD | |
4137 | .mtd_probe = efx_port_dummy_op_int, | |
4138 | #endif | |
4139 | .ptp_write_host_time = efx_ef10_ptp_write_host_time_vf, | |
4140 | .ptp_set_ts_config = efx_ef10_ptp_set_ts_config_vf, | |
4a53ea8a AR |
4141 | .vlan_rx_add_vid = efx_ef10_vlan_rx_add_vid, |
4142 | .vlan_rx_kill_vid = efx_ef10_vlan_rx_kill_vid, | |
02246a7f | 4143 | #ifdef CONFIG_SFC_SRIOV |
7b8c7b54 SS |
4144 | .vswitching_probe = efx_ef10_vswitching_probe_vf, |
4145 | .vswitching_restore = efx_ef10_vswitching_restore_vf, | |
4146 | .vswitching_remove = efx_ef10_vswitching_remove_vf, | |
02246a7f | 4147 | #endif |
0d5e0fbb | 4148 | .get_mac_address = efx_ef10_get_mac_address_vf, |
910c8789 | 4149 | .set_mac_address = efx_ef10_set_mac_address, |
0d5e0fbb | 4150 | |
08a7b29b | 4151 | .get_phys_port_id = efx_ef10_get_phys_port_id, |
02246a7f SS |
4152 | .revision = EFX_REV_HUNT_A0, |
4153 | .max_dma_mask = DMA_BIT_MASK(ESF_DZ_TX_KER_BUF_ADDR_WIDTH), | |
4154 | .rx_prefix_size = ES_DZ_RX_PREFIX_SIZE, | |
4155 | .rx_hash_offset = ES_DZ_RX_PREFIX_HASH_OFST, | |
4156 | .rx_ts_offset = ES_DZ_RX_PREFIX_TSTAMP_OFST, | |
4157 | .can_rx_scatter = true, | |
4158 | .always_rx_scatter = true, | |
6f9f6ec2 | 4159 | .min_interrupt_mode = EFX_INT_MODE_MSIX, |
02246a7f | 4160 | .timer_period_max = 1 << ERF_DD_EVQ_IND_TIMER_VAL_WIDTH, |
100a9db5 | 4161 | .offload_features = EF10_OFFLOAD_FEATURES, |
02246a7f | 4162 | .mcdi_max_ver = 2, |
90c914d2 | 4163 | .max_rx_ip_filters = EFX_MCDI_FILTER_TBL_ROWS, |
02246a7f SS |
4164 | .hwtstamp_filters = 1 << HWTSTAMP_FILTER_NONE | |
4165 | 1 << HWTSTAMP_FILTER_ALL, | |
f74d1995 | 4166 | .rx_hash_key_size = 40, |
be904b85 | 4167 | .check_caps = ef10_check_caps, |
9b46132c | 4168 | .print_additional_fwver = efx_ef10_print_additional_fwver, |
51b35a45 | 4169 | .sensor_event = efx_mcdi_sensor_event, |
000fe940 | 4170 | .rx_recycle_ring_size = efx_ef10_recycle_ring_size, |
02246a7f SS |
4171 | }; |
4172 | ||
8127d661 | 4173 | const struct efx_nic_type efx_hunt_a0_nic_type = { |
6f7f8aa6 | 4174 | .is_vf = false, |
03714bbb | 4175 | .mem_bar = efx_ef10_pf_mem_bar, |
8127d661 | 4176 | .mem_map_size = efx_ef10_mem_map_size, |
02246a7f | 4177 | .probe = efx_ef10_probe_pf, |
8127d661 BH |
4178 | .remove = efx_ef10_remove, |
4179 | .dimension_resources = efx_ef10_dimension_resources, | |
4180 | .init = efx_ef10_init_nic, | |
d3142c19 | 4181 | .fini = efx_ef10_fini_nic, |
087e9025 | 4182 | .map_reset_reason = efx_ef10_map_reset_reason, |
8127d661 | 4183 | .map_reset_flags = efx_ef10_map_reset_flags, |
3e336261 | 4184 | .reset = efx_ef10_reset, |
8127d661 BH |
4185 | .probe_port = efx_mcdi_port_probe, |
4186 | .remove_port = efx_mcdi_port_remove, | |
d700fe01 | 4187 | .fini_dmaq = efx_fini_dmaq, |
e283546c EC |
4188 | .prepare_flr = efx_ef10_prepare_flr, |
4189 | .finish_flr = efx_port_dummy_op_void, | |
8127d661 | 4190 | .describe_stats = efx_ef10_describe_stats, |
d7788196 | 4191 | .update_stats = efx_ef10_update_stats_pf, |
8127d661 | 4192 | .start_stats = efx_mcdi_mac_start_stats, |
f8f3b5ae | 4193 | .pull_stats = efx_mcdi_mac_pull_stats, |
8127d661 | 4194 | .stop_stats = efx_mcdi_mac_stop_stats, |
8127d661 BH |
4195 | .push_irq_moderation = efx_ef10_push_irq_moderation, |
4196 | .reconfigure_mac = efx_ef10_mac_reconfigure, | |
4197 | .check_mac_fault = efx_mcdi_mac_check_fault, | |
4198 | .reconfigure_port = efx_mcdi_port_reconfigure, | |
4199 | .get_wol = efx_ef10_get_wol, | |
4200 | .set_wol = efx_ef10_set_wol, | |
4201 | .resume_wol = efx_port_dummy_op_void, | |
cab351be | 4202 | .get_fec_stats = efx_ef10_get_fec_stats, |
74cd60a4 | 4203 | .test_chip = efx_ef10_test_chip, |
8127d661 BH |
4204 | .test_nvram = efx_mcdi_nvram_test_all, |
4205 | .mcdi_request = efx_ef10_mcdi_request, | |
4206 | .mcdi_poll_response = efx_ef10_mcdi_poll_response, | |
4207 | .mcdi_read_response = efx_ef10_mcdi_read_response, | |
4208 | .mcdi_poll_reboot = efx_ef10_mcdi_poll_reboot, | |
c577e59e | 4209 | .mcdi_reboot_detected = efx_ef10_mcdi_reboot_detected, |
8127d661 BH |
4210 | .irq_enable_master = efx_port_dummy_op_void, |
4211 | .irq_test_generate = efx_ef10_irq_test_generate, | |
4212 | .irq_disable_non_ev = efx_port_dummy_op_void, | |
4213 | .irq_handle_msi = efx_ef10_msi_interrupt, | |
4214 | .irq_handle_legacy = efx_ef10_legacy_interrupt, | |
4215 | .tx_probe = efx_ef10_tx_probe, | |
4216 | .tx_init = efx_ef10_tx_init, | |
8ee4c907 | 4217 | .tx_remove = efx_mcdi_tx_remove, |
8127d661 | 4218 | .tx_write = efx_ef10_tx_write, |
e9117e50 | 4219 | .tx_limit_len = efx_ef10_tx_limit_len, |
51b35a45 | 4220 | .tx_enqueue = __efx_enqueue_skb, |
90c914d2 AM |
4221 | .rx_push_rss_config = efx_mcdi_pf_rx_push_rss_config, |
4222 | .rx_pull_rss_config = efx_mcdi_rx_pull_rss_config, | |
4223 | .rx_push_rss_context_config = efx_mcdi_rx_push_rss_context_config, | |
4224 | .rx_pull_rss_context_config = efx_mcdi_rx_pull_rss_context_config, | |
4225 | .rx_restore_rss_contexts = efx_mcdi_rx_restore_rss_contexts, | |
8da92642 AM |
4226 | .rx_probe = efx_mcdi_rx_probe, |
4227 | .rx_init = efx_mcdi_rx_init, | |
4228 | .rx_remove = efx_mcdi_rx_remove, | |
8127d661 BH |
4229 | .rx_write = efx_ef10_rx_write, |
4230 | .rx_defer_refill = efx_ef10_rx_defer_refill, | |
51b35a45 | 4231 | .rx_packet = __efx_rx_packet, |
4438b587 | 4232 | .ev_probe = efx_mcdi_ev_probe, |
8127d661 | 4233 | .ev_init = efx_ef10_ev_init, |
4438b587 AM |
4234 | .ev_fini = efx_mcdi_ev_fini, |
4235 | .ev_remove = efx_mcdi_ev_remove, | |
8127d661 BH |
4236 | .ev_process = efx_ef10_ev_process, |
4237 | .ev_read_ack = efx_ef10_ev_read_ack, | |
4238 | .ev_test_generate = efx_ef10_ev_test_generate, | |
fd14e5fd | 4239 | .filter_table_probe = efx_ef10_filter_table_probe, |
90c914d2 | 4240 | .filter_table_restore = efx_mcdi_filter_table_restore, |
b7ca8d5f | 4241 | .filter_table_remove = efx_ef10_filter_table_remove, |
90c914d2 AM |
4242 | .filter_update_rx_scatter = efx_mcdi_update_rx_scatter, |
4243 | .filter_insert = efx_mcdi_filter_insert, | |
4244 | .filter_remove_safe = efx_mcdi_filter_remove_safe, | |
4245 | .filter_get_safe = efx_mcdi_filter_get_safe, | |
4246 | .filter_clear_rx = efx_mcdi_filter_clear_rx, | |
4247 | .filter_count_rx_used = efx_mcdi_filter_count_rx_used, | |
4248 | .filter_get_rx_id_limit = efx_mcdi_filter_get_rx_id_limit, | |
4249 | .filter_get_rx_ids = efx_mcdi_filter_get_rx_ids, | |
8127d661 | 4250 | #ifdef CONFIG_RFS_ACCEL |
90c914d2 | 4251 | .filter_rfs_expire_one = efx_mcdi_filter_rfs_expire_one, |
8127d661 BH |
4252 | #endif |
4253 | #ifdef CONFIG_SFC_MTD | |
4254 | .mtd_probe = efx_ef10_mtd_probe, | |
4255 | .mtd_rename = efx_mcdi_mtd_rename, | |
4256 | .mtd_read = efx_mcdi_mtd_read, | |
4257 | .mtd_erase = efx_mcdi_mtd_erase, | |
4258 | .mtd_write = efx_mcdi_mtd_write, | |
4259 | .mtd_sync = efx_mcdi_mtd_sync, | |
4260 | #endif | |
4261 | .ptp_write_host_time = efx_ef10_ptp_write_host_time, | |
bd9a265d JC |
4262 | .ptp_set_ts_sync_events = efx_ef10_ptp_set_ts_sync_events, |
4263 | .ptp_set_ts_config = efx_ef10_ptp_set_ts_config, | |
4a53ea8a AR |
4264 | .vlan_rx_add_vid = efx_ef10_vlan_rx_add_vid, |
4265 | .vlan_rx_kill_vid = efx_ef10_vlan_rx_kill_vid, | |
e5fbd977 | 4266 | .udp_tnl_push_ports = efx_ef10_udp_tnl_push_ports, |
e5fbd977 | 4267 | .udp_tnl_has_port = efx_ef10_udp_tnl_has_port, |
7fa8d547 | 4268 | #ifdef CONFIG_SFC_SRIOV |
834e23dd | 4269 | .sriov_configure = efx_ef10_sriov_configure, |
d98a4ffe SS |
4270 | .sriov_init = efx_ef10_sriov_init, |
4271 | .sriov_fini = efx_ef10_sriov_fini, | |
d98a4ffe | 4272 | .sriov_wanted = efx_ef10_sriov_wanted, |
7fa8d547 SS |
4273 | .sriov_set_vf_mac = efx_ef10_sriov_set_vf_mac, |
4274 | .sriov_set_vf_vlan = efx_ef10_sriov_set_vf_vlan, | |
4275 | .sriov_set_vf_spoofchk = efx_ef10_sriov_set_vf_spoofchk, | |
4276 | .sriov_get_vf_config = efx_ef10_sriov_get_vf_config, | |
4392dc69 | 4277 | .sriov_set_vf_link_state = efx_ef10_sriov_set_vf_link_state, |
7b8c7b54 SS |
4278 | .vswitching_probe = efx_ef10_vswitching_probe_pf, |
4279 | .vswitching_restore = efx_ef10_vswitching_restore_pf, | |
4280 | .vswitching_remove = efx_ef10_vswitching_remove_pf, | |
7fa8d547 | 4281 | #endif |
0d5e0fbb | 4282 | .get_mac_address = efx_ef10_get_mac_address_pf, |
910c8789 | 4283 | .set_mac_address = efx_ef10_set_mac_address, |
46d1efd8 | 4284 | .tso_versions = efx_ef10_tso_versions, |
8127d661 | 4285 | |
08a7b29b | 4286 | .get_phys_port_id = efx_ef10_get_phys_port_id, |
8127d661 BH |
4287 | .revision = EFX_REV_HUNT_A0, |
4288 | .max_dma_mask = DMA_BIT_MASK(ESF_DZ_TX_KER_BUF_ADDR_WIDTH), | |
4289 | .rx_prefix_size = ES_DZ_RX_PREFIX_SIZE, | |
4290 | .rx_hash_offset = ES_DZ_RX_PREFIX_HASH_OFST, | |
bd9a265d | 4291 | .rx_ts_offset = ES_DZ_RX_PREFIX_TSTAMP_OFST, |
8127d661 BH |
4292 | .can_rx_scatter = true, |
4293 | .always_rx_scatter = true, | |
de1deff9 | 4294 | .option_descriptors = true, |
6f9f6ec2 | 4295 | .min_interrupt_mode = EFX_INT_MODE_LEGACY, |
8127d661 | 4296 | .timer_period_max = 1 << ERF_DD_EVQ_IND_TIMER_VAL_WIDTH, |
100a9db5 | 4297 | .offload_features = EF10_OFFLOAD_FEATURES, |
8127d661 | 4298 | .mcdi_max_ver = 2, |
90c914d2 | 4299 | .max_rx_ip_filters = EFX_MCDI_FILTER_TBL_ROWS, |
bd9a265d JC |
4300 | .hwtstamp_filters = 1 << HWTSTAMP_FILTER_NONE | |
4301 | 1 << HWTSTAMP_FILTER_ALL, | |
f74d1995 | 4302 | .rx_hash_key_size = 40, |
be904b85 | 4303 | .check_caps = ef10_check_caps, |
9b46132c | 4304 | .print_additional_fwver = efx_ef10_print_additional_fwver, |
51b35a45 | 4305 | .sensor_event = efx_mcdi_sensor_event, |
000fe940 | 4306 | .rx_recycle_ring_size = efx_ef10_recycle_ring_size, |
8127d661 | 4307 | }; |
cf06766f JC |
4308 | |
4309 | const struct efx_nic_type efx_x4_nic_type = { | |
4310 | .is_vf = false, | |
4311 | .mem_bar = efx_ef10_pf_mem_bar, | |
4312 | .mem_map_size = efx_ef10_mem_map_size, | |
4313 | .probe = efx_ef10_probe_pf, | |
4314 | .remove = efx_ef10_remove, | |
4315 | .dimension_resources = efx_ef10_dimension_resources, | |
4316 | .init = efx_ef10_init_nic, | |
4317 | .fini = efx_ef10_fini_nic, | |
4318 | .map_reset_reason = efx_ef10_map_reset_reason, | |
4319 | .map_reset_flags = efx_ef10_map_reset_flags, | |
4320 | .reset = efx_ef10_reset, | |
4321 | .probe_port = efx_mcdi_port_probe, | |
4322 | .remove_port = efx_mcdi_port_remove, | |
4323 | .fini_dmaq = efx_fini_dmaq, | |
4324 | .prepare_flr = efx_ef10_prepare_flr, | |
4325 | .finish_flr = efx_port_dummy_op_void, | |
4326 | .describe_stats = efx_ef10_describe_stats, | |
4327 | .update_stats = efx_ef10_update_stats_pf, | |
4328 | .start_stats = efx_mcdi_mac_start_stats, | |
4329 | .pull_stats = efx_mcdi_mac_pull_stats, | |
4330 | .stop_stats = efx_mcdi_mac_stop_stats, | |
4331 | .push_irq_moderation = efx_ef10_push_irq_moderation, | |
4332 | .reconfigure_mac = efx_ef10_mac_reconfigure, | |
4333 | .check_mac_fault = efx_mcdi_mac_check_fault, | |
4334 | .reconfigure_port = efx_mcdi_port_reconfigure, | |
4335 | .get_wol = efx_ef10_get_wol, | |
4336 | .set_wol = efx_ef10_set_wol, | |
4337 | .resume_wol = efx_port_dummy_op_void, | |
4338 | .get_fec_stats = efx_ef10_get_fec_stats, | |
4339 | .test_chip = efx_ef10_test_chip, | |
4340 | .test_nvram = efx_mcdi_nvram_test_all, | |
4341 | .mcdi_request = efx_ef10_mcdi_request, | |
4342 | .mcdi_poll_response = efx_ef10_mcdi_poll_response, | |
4343 | .mcdi_read_response = efx_ef10_mcdi_read_response, | |
4344 | .mcdi_poll_reboot = efx_ef10_mcdi_poll_reboot, | |
4345 | .mcdi_reboot_detected = efx_ef10_mcdi_reboot_detected, | |
4346 | .irq_enable_master = efx_port_dummy_op_void, | |
4347 | .irq_test_generate = efx_ef10_irq_test_generate, | |
4348 | .irq_disable_non_ev = efx_port_dummy_op_void, | |
4349 | .irq_handle_msi = efx_ef10_msi_interrupt, | |
4350 | .tx_probe = efx_ef10_tx_probe, | |
4351 | .tx_init = efx_ef10_tx_init, | |
4352 | .tx_write = efx_ef10_tx_write, | |
4353 | .tx_limit_len = efx_ef10_tx_limit_len, | |
4354 | .tx_enqueue = __efx_enqueue_skb, | |
4355 | .rx_push_rss_config = efx_mcdi_pf_rx_push_rss_config, | |
4356 | .rx_pull_rss_config = efx_mcdi_rx_pull_rss_config, | |
4357 | .rx_push_rss_context_config = efx_mcdi_rx_push_rss_context_config, | |
4358 | .rx_pull_rss_context_config = efx_mcdi_rx_pull_rss_context_config, | |
4359 | .rx_restore_rss_contexts = efx_mcdi_rx_restore_rss_contexts, | |
4360 | .rx_probe = efx_mcdi_rx_probe, | |
4361 | .rx_init = efx_mcdi_rx_init, | |
4362 | .rx_remove = efx_mcdi_rx_remove, | |
4363 | .rx_write = efx_ef10_rx_write, | |
4364 | .rx_defer_refill = efx_ef10_rx_defer_refill, | |
4365 | .rx_packet = __efx_rx_packet, | |
4366 | .ev_probe = efx_mcdi_ev_probe, | |
4367 | .ev_init = efx_ef10_ev_init, | |
4368 | .ev_fini = efx_mcdi_ev_fini, | |
4369 | .ev_remove = efx_mcdi_ev_remove, | |
4370 | .ev_process = efx_ef10_ev_process, | |
4371 | .ev_read_ack = efx_ef10_ev_read_ack, | |
4372 | .ev_test_generate = efx_ef10_ev_test_generate, | |
4373 | .filter_table_probe = efx_ef10_filter_table_probe, | |
4374 | .filter_table_restore = efx_mcdi_filter_table_restore, | |
4375 | .filter_table_remove = efx_ef10_filter_table_remove, | |
4376 | .filter_insert = efx_mcdi_filter_insert, | |
4377 | .filter_remove_safe = efx_mcdi_filter_remove_safe, | |
4378 | .filter_get_safe = efx_mcdi_filter_get_safe, | |
4379 | .filter_clear_rx = efx_mcdi_filter_clear_rx, | |
4380 | .filter_count_rx_used = efx_mcdi_filter_count_rx_used, | |
4381 | .filter_get_rx_id_limit = efx_mcdi_filter_get_rx_id_limit, | |
4382 | .filter_get_rx_ids = efx_mcdi_filter_get_rx_ids, | |
4383 | #ifdef CONFIG_RFS_ACCEL | |
4384 | .filter_rfs_expire_one = efx_mcdi_filter_rfs_expire_one, | |
4385 | #endif | |
4386 | #ifdef CONFIG_SFC_MTD | |
4387 | .mtd_probe = efx_ef10_mtd_probe, | |
4388 | .mtd_rename = efx_mcdi_mtd_rename, | |
4389 | .mtd_read = efx_mcdi_mtd_read, | |
4390 | .mtd_erase = efx_mcdi_mtd_erase, | |
4391 | .mtd_write = efx_mcdi_mtd_write, | |
4392 | .mtd_sync = efx_mcdi_mtd_sync, | |
4393 | #endif | |
4394 | .ptp_write_host_time = efx_ef10_ptp_write_host_time, | |
4395 | .ptp_set_ts_sync_events = efx_ef10_ptp_set_ts_sync_events, | |
4396 | .ptp_set_ts_config = efx_ef10_ptp_set_ts_config, | |
4397 | .vlan_rx_add_vid = efx_ef10_vlan_rx_add_vid, | |
4398 | .vlan_rx_kill_vid = efx_ef10_vlan_rx_kill_vid, | |
4399 | .udp_tnl_push_ports = efx_ef10_udp_tnl_push_ports, | |
4400 | .udp_tnl_has_port = efx_ef10_udp_tnl_has_port, | |
4401 | #ifdef CONFIG_SFC_SRIOV | |
4402 | /* currently set to the VF versions of these functions | |
4403 | * because SRIOV will be reimplemented later. | |
4404 | */ | |
4405 | .vswitching_probe = efx_ef10_vswitching_probe_vf, | |
4406 | .vswitching_restore = efx_ef10_vswitching_restore_vf, | |
4407 | .vswitching_remove = efx_ef10_vswitching_remove_vf, | |
4408 | #endif | |
4409 | .get_mac_address = efx_ef10_get_mac_address_pf, | |
4410 | .set_mac_address = efx_ef10_set_mac_address, | |
4411 | .tso_versions = efx_ef10_tso_versions, | |
4412 | ||
4413 | .get_phys_port_id = efx_ef10_get_phys_port_id, | |
4414 | .revision = EFX_REV_X4, | |
4415 | .max_dma_mask = DMA_BIT_MASK(ESF_DZ_TX_KER_BUF_ADDR_WIDTH), | |
4416 | .rx_prefix_size = ES_DZ_RX_PREFIX_SIZE, | |
4417 | .rx_hash_offset = ES_DZ_RX_PREFIX_HASH_OFST, | |
4418 | .rx_ts_offset = ES_DZ_RX_PREFIX_TSTAMP_OFST, | |
4419 | .can_rx_scatter = true, | |
4420 | .always_rx_scatter = true, | |
4421 | .option_descriptors = true, | |
5726a154 | 4422 | .flash_auto_partition = true, |
cf06766f JC |
4423 | .min_interrupt_mode = EFX_INT_MODE_MSIX, |
4424 | .timer_period_max = 1 << ERF_DD_EVQ_IND_TIMER_VAL_WIDTH, | |
4425 | .offload_features = EF10_OFFLOAD_FEATURES, | |
4426 | .mcdi_max_ver = 2, | |
4427 | .max_rx_ip_filters = EFX_MCDI_FILTER_TBL_ROWS, | |
4428 | .hwtstamp_filters = 1 << HWTSTAMP_FILTER_NONE | | |
4429 | 1 << HWTSTAMP_FILTER_ALL, | |
4430 | .check_caps = ef10_check_caps, | |
4431 | .print_additional_fwver = efx_ef10_print_additional_fwver, | |
4432 | .sensor_event = efx_mcdi_sensor_event, | |
4433 | .rx_recycle_ring_size = efx_ef10_recycle_ring_size, | |
4434 | }; | |
4435 |