Commit | Line | Data |
---|---|---|
8127d661 BH |
1 | /**************************************************************************** |
2 | * Driver for Solarflare network controllers and boards | |
3 | * Copyright 2012-2013 Solarflare Communications Inc. | |
4 | * | |
5 | * This program is free software; you can redistribute it and/or modify it | |
6 | * under the terms of the GNU General Public License version 2 as published | |
7 | * by the Free Software Foundation, incorporated herein by reference. | |
8 | */ | |
9 | ||
10 | #include "net_driver.h" | |
11 | #include "ef10_regs.h" | |
12 | #include "io.h" | |
13 | #include "mcdi.h" | |
14 | #include "mcdi_pcol.h" | |
15 | #include "nic.h" | |
16 | #include "workarounds.h" | |
17 | #include <linux/in.h> | |
18 | #include <linux/jhash.h> | |
19 | #include <linux/wait.h> | |
20 | #include <linux/workqueue.h> | |
21 | ||
22 | /* Hardware control for EF10 architecture including 'Huntington'. */ | |
23 | ||
24 | #define EFX_EF10_DRVGEN_EV 7 | |
25 | enum { | |
26 | EFX_EF10_TEST = 1, | |
27 | EFX_EF10_REFILL, | |
28 | }; | |
29 | ||
30 | /* The reserved RSS context value */ | |
31 | #define EFX_EF10_RSS_CONTEXT_INVALID 0xffffffff | |
32 | ||
33 | /* The filter table(s) are managed by firmware and we have write-only | |
34 | * access. When removing filters we must identify them to the | |
35 | * firmware by a 64-bit handle, but this is too wide for Linux kernel | |
36 | * interfaces (32-bit for RX NFC, 16-bit for RFS). Also, we need to | |
37 | * be able to tell in advance whether a requested insertion will | |
38 | * replace an existing filter. Therefore we maintain a software hash | |
39 | * table, which should be at least as large as the hardware hash | |
40 | * table. | |
41 | * | |
42 | * Huntington has a single 8K filter table shared between all filter | |
43 | * types and both ports. | |
44 | */ | |
45 | #define HUNT_FILTER_TBL_ROWS 8192 | |
46 | ||
47 | struct efx_ef10_filter_table { | |
48 | /* The RX match field masks supported by this fw & hw, in order of priority */ | |
49 | enum efx_filter_match_flags rx_match_flags[ | |
50 | MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_MAXNUM]; | |
51 | unsigned int rx_match_count; | |
52 | ||
53 | struct { | |
54 | unsigned long spec; /* pointer to spec plus flag bits */ | |
55 | /* BUSY flag indicates that an update is in progress. STACK_OLD is | |
56 | * used to mark and sweep stack-owned MAC filters. | |
57 | */ | |
58 | #define EFX_EF10_FILTER_FLAG_BUSY 1UL | |
59 | #define EFX_EF10_FILTER_FLAG_STACK_OLD 2UL | |
60 | #define EFX_EF10_FILTER_FLAGS 3UL | |
61 | u64 handle; /* firmware handle */ | |
62 | } *entry; | |
63 | wait_queue_head_t waitq; | |
64 | /* Shadow of net_device address lists, guarded by mac_lock */ | |
65 | #define EFX_EF10_FILTER_STACK_UC_MAX 32 | |
66 | #define EFX_EF10_FILTER_STACK_MC_MAX 256 | |
67 | struct { | |
68 | u8 addr[ETH_ALEN]; | |
69 | u16 id; | |
70 | } stack_uc_list[EFX_EF10_FILTER_STACK_UC_MAX], | |
71 | stack_mc_list[EFX_EF10_FILTER_STACK_MC_MAX]; | |
72 | int stack_uc_count; /* negative for PROMISC */ | |
73 | int stack_mc_count; /* negative for PROMISC/ALLMULTI */ | |
74 | }; | |
75 | ||
76 | /* An arbitrary search limit for the software hash table */ | |
77 | #define EFX_EF10_FILTER_SEARCH_LIMIT 200 | |
78 | ||
79 | static void efx_ef10_rx_push_indir_table(struct efx_nic *efx); | |
80 | static void efx_ef10_rx_free_indir_table(struct efx_nic *efx); | |
81 | static void efx_ef10_filter_table_remove(struct efx_nic *efx); | |
82 | ||
83 | static int efx_ef10_get_warm_boot_count(struct efx_nic *efx) | |
84 | { | |
85 | efx_dword_t reg; | |
86 | ||
87 | efx_readd(efx, ®, ER_DZ_BIU_MC_SFT_STATUS); | |
88 | return EFX_DWORD_FIELD(reg, EFX_WORD_1) == 0xb007 ? | |
89 | EFX_DWORD_FIELD(reg, EFX_WORD_0) : -EIO; | |
90 | } | |
91 | ||
92 | static unsigned int efx_ef10_mem_map_size(struct efx_nic *efx) | |
93 | { | |
94 | return resource_size(&efx->pci_dev->resource[EFX_MEM_BAR]); | |
95 | } | |
96 | ||
97 | static int efx_ef10_init_capabilities(struct efx_nic *efx) | |
98 | { | |
99 | MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_CAPABILITIES_OUT_LEN); | |
100 | struct efx_ef10_nic_data *nic_data = efx->nic_data; | |
101 | size_t outlen; | |
102 | int rc; | |
103 | ||
104 | BUILD_BUG_ON(MC_CMD_GET_CAPABILITIES_IN_LEN != 0); | |
105 | ||
106 | rc = efx_mcdi_rpc(efx, MC_CMD_GET_CAPABILITIES, NULL, 0, | |
107 | outbuf, sizeof(outbuf), &outlen); | |
108 | if (rc) | |
109 | return rc; | |
110 | ||
111 | if (outlen >= sizeof(outbuf)) { | |
112 | nic_data->datapath_caps = | |
113 | MCDI_DWORD(outbuf, GET_CAPABILITIES_OUT_FLAGS1); | |
114 | if (!(nic_data->datapath_caps & | |
115 | (1 << MC_CMD_GET_CAPABILITIES_OUT_TX_TSO_LBN))) { | |
116 | netif_err(efx, drv, efx->net_dev, | |
117 | "Capabilities don't indicate TSO support.\n"); | |
118 | return -ENODEV; | |
119 | } | |
120 | } | |
121 | ||
122 | return 0; | |
123 | } | |
124 | ||
125 | static int efx_ef10_get_sysclk_freq(struct efx_nic *efx) | |
126 | { | |
127 | MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_CLOCK_OUT_LEN); | |
128 | int rc; | |
129 | ||
130 | rc = efx_mcdi_rpc(efx, MC_CMD_GET_CLOCK, NULL, 0, | |
131 | outbuf, sizeof(outbuf), NULL); | |
132 | if (rc) | |
133 | return rc; | |
134 | rc = MCDI_DWORD(outbuf, GET_CLOCK_OUT_SYS_FREQ); | |
135 | return rc > 0 ? rc : -ERANGE; | |
136 | } | |
137 | ||
138 | static int efx_ef10_get_mac_address(struct efx_nic *efx, u8 *mac_address) | |
139 | { | |
140 | MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_MAC_ADDRESSES_OUT_LEN); | |
141 | size_t outlen; | |
142 | int rc; | |
143 | ||
144 | BUILD_BUG_ON(MC_CMD_GET_MAC_ADDRESSES_IN_LEN != 0); | |
145 | ||
146 | rc = efx_mcdi_rpc(efx, MC_CMD_GET_MAC_ADDRESSES, NULL, 0, | |
147 | outbuf, sizeof(outbuf), &outlen); | |
148 | if (rc) | |
149 | return rc; | |
150 | if (outlen < MC_CMD_GET_MAC_ADDRESSES_OUT_LEN) | |
151 | return -EIO; | |
152 | ||
153 | memcpy(mac_address, | |
154 | MCDI_PTR(outbuf, GET_MAC_ADDRESSES_OUT_MAC_ADDR_BASE), ETH_ALEN); | |
155 | return 0; | |
156 | } | |
157 | ||
158 | static int efx_ef10_probe(struct efx_nic *efx) | |
159 | { | |
160 | struct efx_ef10_nic_data *nic_data; | |
161 | int i, rc; | |
162 | ||
163 | /* We can have one VI for each 8K region. However we need | |
164 | * multiple TX queues per channel. | |
165 | */ | |
166 | efx->max_channels = | |
167 | min_t(unsigned int, | |
168 | EFX_MAX_CHANNELS, | |
169 | resource_size(&efx->pci_dev->resource[EFX_MEM_BAR]) / | |
170 | (EFX_VI_PAGE_SIZE * EFX_TXQ_TYPES)); | |
171 | BUG_ON(efx->max_channels == 0); | |
172 | ||
173 | nic_data = kzalloc(sizeof(*nic_data), GFP_KERNEL); | |
174 | if (!nic_data) | |
175 | return -ENOMEM; | |
176 | efx->nic_data = nic_data; | |
177 | ||
178 | rc = efx_nic_alloc_buffer(efx, &nic_data->mcdi_buf, | |
179 | 8 + MCDI_CTL_SDU_LEN_MAX_V2, GFP_KERNEL); | |
180 | if (rc) | |
181 | goto fail1; | |
182 | ||
183 | /* Get the MC's warm boot count. In case it's rebooting right | |
184 | * now, be prepared to retry. | |
185 | */ | |
186 | i = 0; | |
187 | for (;;) { | |
188 | rc = efx_ef10_get_warm_boot_count(efx); | |
189 | if (rc >= 0) | |
190 | break; | |
191 | if (++i == 5) | |
192 | goto fail2; | |
193 | ssleep(1); | |
194 | } | |
195 | nic_data->warm_boot_count = rc; | |
196 | ||
197 | nic_data->rx_rss_context = EFX_EF10_RSS_CONTEXT_INVALID; | |
198 | ||
199 | /* In case we're recovering from a crash (kexec), we want to | |
200 | * cancel any outstanding request by the previous user of this | |
201 | * function. We send a special message using the least | |
202 | * significant bits of the 'high' (doorbell) register. | |
203 | */ | |
204 | _efx_writed(efx, cpu_to_le32(1), ER_DZ_MC_DB_HWRD); | |
205 | ||
206 | rc = efx_mcdi_init(efx); | |
207 | if (rc) | |
208 | goto fail2; | |
209 | ||
210 | /* Reset (most) configuration for this function */ | |
211 | rc = efx_mcdi_reset(efx, RESET_TYPE_ALL); | |
212 | if (rc) | |
213 | goto fail3; | |
214 | ||
215 | /* Enable event logging */ | |
216 | rc = efx_mcdi_log_ctrl(efx, true, false, 0); | |
217 | if (rc) | |
218 | goto fail3; | |
219 | ||
220 | rc = efx_ef10_init_capabilities(efx); | |
221 | if (rc < 0) | |
222 | goto fail3; | |
223 | ||
224 | efx->rx_packet_len_offset = | |
225 | ES_DZ_RX_PREFIX_PKTLEN_OFST - ES_DZ_RX_PREFIX_SIZE; | |
226 | ||
227 | if (!(nic_data->datapath_caps & | |
228 | (1 << MC_CMD_GET_CAPABILITIES_OUT_RX_PREFIX_LEN_14_LBN))) { | |
229 | netif_err(efx, probe, efx->net_dev, | |
230 | "current firmware does not support an RX prefix\n"); | |
231 | rc = -ENODEV; | |
232 | goto fail3; | |
233 | } | |
234 | ||
235 | rc = efx_mcdi_port_get_number(efx); | |
236 | if (rc < 0) | |
237 | goto fail3; | |
238 | efx->port_num = rc; | |
239 | ||
240 | rc = efx_ef10_get_mac_address(efx, efx->net_dev->perm_addr); | |
241 | if (rc) | |
242 | goto fail3; | |
243 | ||
244 | rc = efx_ef10_get_sysclk_freq(efx); | |
245 | if (rc < 0) | |
246 | goto fail3; | |
247 | efx->timer_quantum_ns = 1536000 / rc; /* 1536 cycles */ | |
248 | ||
249 | /* Check whether firmware supports bug 35388 workaround */ | |
250 | rc = efx_mcdi_set_workaround(efx, MC_CMD_WORKAROUND_BUG35388, true); | |
251 | if (rc == 0) | |
252 | nic_data->workaround_35388 = true; | |
253 | else if (rc != -ENOSYS && rc != -ENOENT) | |
254 | goto fail3; | |
255 | netif_dbg(efx, probe, efx->net_dev, | |
256 | "workaround for bug 35388 is %sabled\n", | |
257 | nic_data->workaround_35388 ? "en" : "dis"); | |
258 | ||
259 | rc = efx_mcdi_mon_probe(efx); | |
260 | if (rc) | |
261 | goto fail3; | |
262 | ||
263 | efx_ptp_probe(efx); | |
264 | ||
265 | return 0; | |
266 | ||
267 | fail3: | |
268 | efx_mcdi_fini(efx); | |
269 | fail2: | |
270 | efx_nic_free_buffer(efx, &nic_data->mcdi_buf); | |
271 | fail1: | |
272 | kfree(nic_data); | |
273 | efx->nic_data = NULL; | |
274 | return rc; | |
275 | } | |
276 | ||
277 | static int efx_ef10_free_vis(struct efx_nic *efx) | |
278 | { | |
279 | int rc = efx_mcdi_rpc(efx, MC_CMD_FREE_VIS, NULL, 0, NULL, 0, NULL); | |
280 | ||
281 | /* -EALREADY means nothing to free, so ignore */ | |
282 | if (rc == -EALREADY) | |
283 | rc = 0; | |
284 | return rc; | |
285 | } | |
286 | ||
287 | static void efx_ef10_remove(struct efx_nic *efx) | |
288 | { | |
289 | struct efx_ef10_nic_data *nic_data = efx->nic_data; | |
290 | int rc; | |
291 | ||
292 | efx_mcdi_mon_remove(efx); | |
293 | ||
294 | /* This needs to be after efx_ptp_remove_channel() with no filters */ | |
295 | efx_ef10_rx_free_indir_table(efx); | |
296 | ||
297 | rc = efx_ef10_free_vis(efx); | |
298 | WARN_ON(rc != 0); | |
299 | ||
300 | efx_mcdi_fini(efx); | |
301 | efx_nic_free_buffer(efx, &nic_data->mcdi_buf); | |
302 | kfree(nic_data); | |
303 | } | |
304 | ||
305 | static int efx_ef10_alloc_vis(struct efx_nic *efx, | |
306 | unsigned int min_vis, unsigned int max_vis) | |
307 | { | |
308 | MCDI_DECLARE_BUF(inbuf, MC_CMD_ALLOC_VIS_IN_LEN); | |
309 | MCDI_DECLARE_BUF(outbuf, MC_CMD_ALLOC_VIS_OUT_LEN); | |
310 | struct efx_ef10_nic_data *nic_data = efx->nic_data; | |
311 | size_t outlen; | |
312 | int rc; | |
313 | ||
314 | MCDI_SET_DWORD(inbuf, ALLOC_VIS_IN_MIN_VI_COUNT, min_vis); | |
315 | MCDI_SET_DWORD(inbuf, ALLOC_VIS_IN_MAX_VI_COUNT, max_vis); | |
316 | rc = efx_mcdi_rpc(efx, MC_CMD_ALLOC_VIS, inbuf, sizeof(inbuf), | |
317 | outbuf, sizeof(outbuf), &outlen); | |
318 | if (rc != 0) | |
319 | return rc; | |
320 | ||
321 | if (outlen < MC_CMD_ALLOC_VIS_OUT_LEN) | |
322 | return -EIO; | |
323 | ||
324 | netif_dbg(efx, drv, efx->net_dev, "base VI is A0x%03x\n", | |
325 | MCDI_DWORD(outbuf, ALLOC_VIS_OUT_VI_BASE)); | |
326 | ||
327 | nic_data->vi_base = MCDI_DWORD(outbuf, ALLOC_VIS_OUT_VI_BASE); | |
328 | nic_data->n_allocated_vis = MCDI_DWORD(outbuf, ALLOC_VIS_OUT_VI_COUNT); | |
329 | return 0; | |
330 | } | |
331 | ||
332 | static int efx_ef10_dimension_resources(struct efx_nic *efx) | |
333 | { | |
334 | unsigned int n_vis = | |
335 | max(efx->n_channels, efx->n_tx_channels * EFX_TXQ_TYPES); | |
336 | ||
337 | return efx_ef10_alloc_vis(efx, n_vis, n_vis); | |
338 | } | |
339 | ||
340 | static int efx_ef10_init_nic(struct efx_nic *efx) | |
341 | { | |
342 | struct efx_ef10_nic_data *nic_data = efx->nic_data; | |
343 | int rc; | |
344 | ||
345 | if (nic_data->must_realloc_vis) { | |
346 | /* We cannot let the number of VIs change now */ | |
347 | rc = efx_ef10_alloc_vis(efx, nic_data->n_allocated_vis, | |
348 | nic_data->n_allocated_vis); | |
349 | if (rc) | |
350 | return rc; | |
351 | nic_data->must_realloc_vis = false; | |
352 | } | |
353 | ||
354 | efx_ef10_rx_push_indir_table(efx); | |
355 | return 0; | |
356 | } | |
357 | ||
358 | static int efx_ef10_map_reset_flags(u32 *flags) | |
359 | { | |
360 | enum { | |
361 | EF10_RESET_PORT = ((ETH_RESET_MAC | ETH_RESET_PHY) << | |
362 | ETH_RESET_SHARED_SHIFT), | |
363 | EF10_RESET_MC = ((ETH_RESET_DMA | ETH_RESET_FILTER | | |
364 | ETH_RESET_OFFLOAD | ETH_RESET_MAC | | |
365 | ETH_RESET_PHY | ETH_RESET_MGMT) << | |
366 | ETH_RESET_SHARED_SHIFT) | |
367 | }; | |
368 | ||
369 | /* We assume for now that our PCI function is permitted to | |
370 | * reset everything. | |
371 | */ | |
372 | ||
373 | if ((*flags & EF10_RESET_MC) == EF10_RESET_MC) { | |
374 | *flags &= ~EF10_RESET_MC; | |
375 | return RESET_TYPE_WORLD; | |
376 | } | |
377 | ||
378 | if ((*flags & EF10_RESET_PORT) == EF10_RESET_PORT) { | |
379 | *flags &= ~EF10_RESET_PORT; | |
380 | return RESET_TYPE_ALL; | |
381 | } | |
382 | ||
383 | /* no invisible reset implemented */ | |
384 | ||
385 | return -EINVAL; | |
386 | } | |
387 | ||
388 | #define EF10_DMA_STAT(ext_name, mcdi_name) \ | |
389 | [EF10_STAT_ ## ext_name] = \ | |
390 | { #ext_name, 64, 8 * MC_CMD_MAC_ ## mcdi_name } | |
391 | #define EF10_DMA_INVIS_STAT(int_name, mcdi_name) \ | |
392 | [EF10_STAT_ ## int_name] = \ | |
393 | { NULL, 64, 8 * MC_CMD_MAC_ ## mcdi_name } | |
394 | #define EF10_OTHER_STAT(ext_name) \ | |
395 | [EF10_STAT_ ## ext_name] = { #ext_name, 0, 0 } | |
396 | ||
397 | static const struct efx_hw_stat_desc efx_ef10_stat_desc[EF10_STAT_COUNT] = { | |
398 | EF10_DMA_STAT(tx_bytes, TX_BYTES), | |
399 | EF10_DMA_STAT(tx_packets, TX_PKTS), | |
400 | EF10_DMA_STAT(tx_pause, TX_PAUSE_PKTS), | |
401 | EF10_DMA_STAT(tx_control, TX_CONTROL_PKTS), | |
402 | EF10_DMA_STAT(tx_unicast, TX_UNICAST_PKTS), | |
403 | EF10_DMA_STAT(tx_multicast, TX_MULTICAST_PKTS), | |
404 | EF10_DMA_STAT(tx_broadcast, TX_BROADCAST_PKTS), | |
405 | EF10_DMA_STAT(tx_lt64, TX_LT64_PKTS), | |
406 | EF10_DMA_STAT(tx_64, TX_64_PKTS), | |
407 | EF10_DMA_STAT(tx_65_to_127, TX_65_TO_127_PKTS), | |
408 | EF10_DMA_STAT(tx_128_to_255, TX_128_TO_255_PKTS), | |
409 | EF10_DMA_STAT(tx_256_to_511, TX_256_TO_511_PKTS), | |
410 | EF10_DMA_STAT(tx_512_to_1023, TX_512_TO_1023_PKTS), | |
411 | EF10_DMA_STAT(tx_1024_to_15xx, TX_1024_TO_15XX_PKTS), | |
412 | EF10_DMA_STAT(tx_15xx_to_jumbo, TX_15XX_TO_JUMBO_PKTS), | |
413 | EF10_DMA_STAT(rx_bytes, RX_BYTES), | |
414 | EF10_DMA_INVIS_STAT(rx_bytes_minus_good_bytes, RX_BAD_BYTES), | |
415 | EF10_OTHER_STAT(rx_good_bytes), | |
416 | EF10_OTHER_STAT(rx_bad_bytes), | |
417 | EF10_DMA_STAT(rx_packets, RX_PKTS), | |
418 | EF10_DMA_STAT(rx_good, RX_GOOD_PKTS), | |
419 | EF10_DMA_STAT(rx_bad, RX_BAD_FCS_PKTS), | |
420 | EF10_DMA_STAT(rx_pause, RX_PAUSE_PKTS), | |
421 | EF10_DMA_STAT(rx_control, RX_CONTROL_PKTS), | |
422 | EF10_DMA_STAT(rx_unicast, RX_UNICAST_PKTS), | |
423 | EF10_DMA_STAT(rx_multicast, RX_MULTICAST_PKTS), | |
424 | EF10_DMA_STAT(rx_broadcast, RX_BROADCAST_PKTS), | |
425 | EF10_DMA_STAT(rx_lt64, RX_UNDERSIZE_PKTS), | |
426 | EF10_DMA_STAT(rx_64, RX_64_PKTS), | |
427 | EF10_DMA_STAT(rx_65_to_127, RX_65_TO_127_PKTS), | |
428 | EF10_DMA_STAT(rx_128_to_255, RX_128_TO_255_PKTS), | |
429 | EF10_DMA_STAT(rx_256_to_511, RX_256_TO_511_PKTS), | |
430 | EF10_DMA_STAT(rx_512_to_1023, RX_512_TO_1023_PKTS), | |
431 | EF10_DMA_STAT(rx_1024_to_15xx, RX_1024_TO_15XX_PKTS), | |
432 | EF10_DMA_STAT(rx_15xx_to_jumbo, RX_15XX_TO_JUMBO_PKTS), | |
433 | EF10_DMA_STAT(rx_gtjumbo, RX_GTJUMBO_PKTS), | |
434 | EF10_DMA_STAT(rx_bad_gtjumbo, RX_JABBER_PKTS), | |
435 | EF10_DMA_STAT(rx_overflow, RX_OVERFLOW_PKTS), | |
436 | EF10_DMA_STAT(rx_align_error, RX_ALIGN_ERROR_PKTS), | |
437 | EF10_DMA_STAT(rx_length_error, RX_LENGTH_ERROR_PKTS), | |
438 | EF10_DMA_STAT(rx_nodesc_drops, RX_NODESC_DROPS), | |
439 | }; | |
440 | ||
441 | #define HUNT_COMMON_STAT_MASK ((1ULL << EF10_STAT_tx_bytes) | \ | |
442 | (1ULL << EF10_STAT_tx_packets) | \ | |
443 | (1ULL << EF10_STAT_tx_pause) | \ | |
444 | (1ULL << EF10_STAT_tx_unicast) | \ | |
445 | (1ULL << EF10_STAT_tx_multicast) | \ | |
446 | (1ULL << EF10_STAT_tx_broadcast) | \ | |
447 | (1ULL << EF10_STAT_rx_bytes) | \ | |
448 | (1ULL << EF10_STAT_rx_bytes_minus_good_bytes) | \ | |
449 | (1ULL << EF10_STAT_rx_good_bytes) | \ | |
450 | (1ULL << EF10_STAT_rx_bad_bytes) | \ | |
451 | (1ULL << EF10_STAT_rx_packets) | \ | |
452 | (1ULL << EF10_STAT_rx_good) | \ | |
453 | (1ULL << EF10_STAT_rx_bad) | \ | |
454 | (1ULL << EF10_STAT_rx_pause) | \ | |
455 | (1ULL << EF10_STAT_rx_control) | \ | |
456 | (1ULL << EF10_STAT_rx_unicast) | \ | |
457 | (1ULL << EF10_STAT_rx_multicast) | \ | |
458 | (1ULL << EF10_STAT_rx_broadcast) | \ | |
459 | (1ULL << EF10_STAT_rx_lt64) | \ | |
460 | (1ULL << EF10_STAT_rx_64) | \ | |
461 | (1ULL << EF10_STAT_rx_65_to_127) | \ | |
462 | (1ULL << EF10_STAT_rx_128_to_255) | \ | |
463 | (1ULL << EF10_STAT_rx_256_to_511) | \ | |
464 | (1ULL << EF10_STAT_rx_512_to_1023) | \ | |
465 | (1ULL << EF10_STAT_rx_1024_to_15xx) | \ | |
466 | (1ULL << EF10_STAT_rx_15xx_to_jumbo) | \ | |
467 | (1ULL << EF10_STAT_rx_gtjumbo) | \ | |
468 | (1ULL << EF10_STAT_rx_bad_gtjumbo) | \ | |
469 | (1ULL << EF10_STAT_rx_overflow) | \ | |
470 | (1ULL << EF10_STAT_rx_nodesc_drops)) | |
471 | ||
472 | /* These statistics are only provided by the 10G MAC. For a 10G/40G | |
473 | * switchable port we do not expose these because they might not | |
474 | * include all the packets they should. | |
475 | */ | |
476 | #define HUNT_10G_ONLY_STAT_MASK ((1ULL << EF10_STAT_tx_control) | \ | |
477 | (1ULL << EF10_STAT_tx_lt64) | \ | |
478 | (1ULL << EF10_STAT_tx_64) | \ | |
479 | (1ULL << EF10_STAT_tx_65_to_127) | \ | |
480 | (1ULL << EF10_STAT_tx_128_to_255) | \ | |
481 | (1ULL << EF10_STAT_tx_256_to_511) | \ | |
482 | (1ULL << EF10_STAT_tx_512_to_1023) | \ | |
483 | (1ULL << EF10_STAT_tx_1024_to_15xx) | \ | |
484 | (1ULL << EF10_STAT_tx_15xx_to_jumbo)) | |
485 | ||
486 | /* These statistics are only provided by the 40G MAC. For a 10G/40G | |
487 | * switchable port we do expose these because the errors will otherwise | |
488 | * be silent. | |
489 | */ | |
490 | #define HUNT_40G_EXTRA_STAT_MASK ((1ULL << EF10_STAT_rx_align_error) | \ | |
491 | (1ULL << EF10_STAT_rx_length_error)) | |
492 | ||
493 | #if BITS_PER_LONG == 64 | |
494 | #define STAT_MASK_BITMAP(bits) (bits) | |
495 | #else | |
496 | #define STAT_MASK_BITMAP(bits) (bits) & 0xffffffff, (bits) >> 32 | |
497 | #endif | |
498 | ||
499 | static const unsigned long *efx_ef10_stat_mask(struct efx_nic *efx) | |
500 | { | |
501 | static const unsigned long hunt_40g_stat_mask[] = { | |
502 | STAT_MASK_BITMAP(HUNT_COMMON_STAT_MASK | | |
503 | HUNT_40G_EXTRA_STAT_MASK) | |
504 | }; | |
505 | static const unsigned long hunt_10g_only_stat_mask[] = { | |
506 | STAT_MASK_BITMAP(HUNT_COMMON_STAT_MASK | | |
507 | HUNT_10G_ONLY_STAT_MASK) | |
508 | }; | |
509 | u32 port_caps = efx_mcdi_phy_get_caps(efx); | |
510 | ||
511 | if (port_caps & (1 << MC_CMD_PHY_CAP_40000FDX_LBN)) | |
512 | return hunt_40g_stat_mask; | |
513 | else | |
514 | return hunt_10g_only_stat_mask; | |
515 | } | |
516 | ||
517 | static size_t efx_ef10_describe_stats(struct efx_nic *efx, u8 *names) | |
518 | { | |
519 | return efx_nic_describe_stats(efx_ef10_stat_desc, EF10_STAT_COUNT, | |
520 | efx_ef10_stat_mask(efx), names); | |
521 | } | |
522 | ||
523 | static int efx_ef10_try_update_nic_stats(struct efx_nic *efx) | |
524 | { | |
525 | struct efx_ef10_nic_data *nic_data = efx->nic_data; | |
526 | const unsigned long *stats_mask = efx_ef10_stat_mask(efx); | |
527 | __le64 generation_start, generation_end; | |
528 | u64 *stats = nic_data->stats; | |
529 | __le64 *dma_stats; | |
530 | ||
531 | dma_stats = efx->stats_buffer.addr; | |
532 | nic_data = efx->nic_data; | |
533 | ||
534 | generation_end = dma_stats[MC_CMD_MAC_GENERATION_END]; | |
535 | if (generation_end == EFX_MC_STATS_GENERATION_INVALID) | |
536 | return 0; | |
537 | rmb(); | |
538 | efx_nic_update_stats(efx_ef10_stat_desc, EF10_STAT_COUNT, stats_mask, | |
539 | stats, efx->stats_buffer.addr, false); | |
540 | generation_start = dma_stats[MC_CMD_MAC_GENERATION_START]; | |
541 | if (generation_end != generation_start) | |
542 | return -EAGAIN; | |
543 | ||
544 | /* Update derived statistics */ | |
545 | stats[EF10_STAT_rx_good_bytes] = | |
546 | stats[EF10_STAT_rx_bytes] - | |
547 | stats[EF10_STAT_rx_bytes_minus_good_bytes]; | |
548 | efx_update_diff_stat(&stats[EF10_STAT_rx_bad_bytes], | |
549 | stats[EF10_STAT_rx_bytes_minus_good_bytes]); | |
550 | ||
551 | return 0; | |
552 | } | |
553 | ||
554 | ||
555 | static size_t efx_ef10_update_stats(struct efx_nic *efx, u64 *full_stats, | |
556 | struct rtnl_link_stats64 *core_stats) | |
557 | { | |
558 | const unsigned long *mask = efx_ef10_stat_mask(efx); | |
559 | struct efx_ef10_nic_data *nic_data = efx->nic_data; | |
560 | u64 *stats = nic_data->stats; | |
561 | size_t stats_count = 0, index; | |
562 | int retry; | |
563 | ||
564 | /* If we're unlucky enough to read statistics during the DMA, wait | |
565 | * up to 10ms for it to finish (typically takes <500us) | |
566 | */ | |
567 | for (retry = 0; retry < 100; ++retry) { | |
568 | if (efx_ef10_try_update_nic_stats(efx) == 0) | |
569 | break; | |
570 | udelay(100); | |
571 | } | |
572 | ||
573 | if (full_stats) { | |
574 | for_each_set_bit(index, mask, EF10_STAT_COUNT) { | |
575 | if (efx_ef10_stat_desc[index].name) { | |
576 | *full_stats++ = stats[index]; | |
577 | ++stats_count; | |
578 | } | |
579 | } | |
580 | } | |
581 | ||
582 | if (core_stats) { | |
583 | core_stats->rx_packets = stats[EF10_STAT_rx_packets]; | |
584 | core_stats->tx_packets = stats[EF10_STAT_tx_packets]; | |
585 | core_stats->rx_bytes = stats[EF10_STAT_rx_bytes]; | |
586 | core_stats->tx_bytes = stats[EF10_STAT_tx_bytes]; | |
587 | core_stats->rx_dropped = stats[EF10_STAT_rx_nodesc_drops]; | |
588 | core_stats->multicast = stats[EF10_STAT_rx_multicast]; | |
589 | core_stats->rx_length_errors = | |
590 | stats[EF10_STAT_rx_gtjumbo] + | |
591 | stats[EF10_STAT_rx_length_error]; | |
592 | core_stats->rx_crc_errors = stats[EF10_STAT_rx_bad]; | |
593 | core_stats->rx_frame_errors = stats[EF10_STAT_rx_align_error]; | |
594 | core_stats->rx_fifo_errors = stats[EF10_STAT_rx_overflow]; | |
595 | core_stats->rx_errors = (core_stats->rx_length_errors + | |
596 | core_stats->rx_crc_errors + | |
597 | core_stats->rx_frame_errors); | |
598 | } | |
599 | ||
600 | return stats_count; | |
601 | } | |
602 | ||
603 | static void efx_ef10_push_irq_moderation(struct efx_channel *channel) | |
604 | { | |
605 | struct efx_nic *efx = channel->efx; | |
606 | unsigned int mode, value; | |
607 | efx_dword_t timer_cmd; | |
608 | ||
609 | if (channel->irq_moderation) { | |
610 | mode = 3; | |
611 | value = channel->irq_moderation - 1; | |
612 | } else { | |
613 | mode = 0; | |
614 | value = 0; | |
615 | } | |
616 | ||
617 | if (EFX_EF10_WORKAROUND_35388(efx)) { | |
618 | EFX_POPULATE_DWORD_3(timer_cmd, ERF_DD_EVQ_IND_TIMER_FLAGS, | |
619 | EFE_DD_EVQ_IND_TIMER_FLAGS, | |
620 | ERF_DD_EVQ_IND_TIMER_MODE, mode, | |
621 | ERF_DD_EVQ_IND_TIMER_VAL, value); | |
622 | efx_writed_page(efx, &timer_cmd, ER_DD_EVQ_INDIRECT, | |
623 | channel->channel); | |
624 | } else { | |
625 | EFX_POPULATE_DWORD_2(timer_cmd, ERF_DZ_TC_TIMER_MODE, mode, | |
626 | ERF_DZ_TC_TIMER_VAL, value); | |
627 | efx_writed_page(efx, &timer_cmd, ER_DZ_EVQ_TMR, | |
628 | channel->channel); | |
629 | } | |
630 | } | |
631 | ||
632 | static void efx_ef10_get_wol(struct efx_nic *efx, struct ethtool_wolinfo *wol) | |
633 | { | |
634 | wol->supported = 0; | |
635 | wol->wolopts = 0; | |
636 | memset(&wol->sopass, 0, sizeof(wol->sopass)); | |
637 | } | |
638 | ||
639 | static int efx_ef10_set_wol(struct efx_nic *efx, u32 type) | |
640 | { | |
641 | if (type != 0) | |
642 | return -EINVAL; | |
643 | return 0; | |
644 | } | |
645 | ||
646 | static void efx_ef10_mcdi_request(struct efx_nic *efx, | |
647 | const efx_dword_t *hdr, size_t hdr_len, | |
648 | const efx_dword_t *sdu, size_t sdu_len) | |
649 | { | |
650 | struct efx_ef10_nic_data *nic_data = efx->nic_data; | |
651 | u8 *pdu = nic_data->mcdi_buf.addr; | |
652 | ||
653 | memcpy(pdu, hdr, hdr_len); | |
654 | memcpy(pdu + hdr_len, sdu, sdu_len); | |
655 | wmb(); | |
656 | ||
657 | /* The hardware provides 'low' and 'high' (doorbell) registers | |
658 | * for passing the 64-bit address of an MCDI request to | |
659 | * firmware. However the dwords are swapped by firmware. The | |
660 | * least significant bits of the doorbell are then 0 for all | |
661 | * MCDI requests due to alignment. | |
662 | */ | |
663 | _efx_writed(efx, cpu_to_le32((u64)nic_data->mcdi_buf.dma_addr >> 32), | |
664 | ER_DZ_MC_DB_LWRD); | |
665 | _efx_writed(efx, cpu_to_le32((u32)nic_data->mcdi_buf.dma_addr), | |
666 | ER_DZ_MC_DB_HWRD); | |
667 | } | |
668 | ||
669 | static bool efx_ef10_mcdi_poll_response(struct efx_nic *efx) | |
670 | { | |
671 | struct efx_ef10_nic_data *nic_data = efx->nic_data; | |
672 | const efx_dword_t hdr = *(const efx_dword_t *)nic_data->mcdi_buf.addr; | |
673 | ||
674 | rmb(); | |
675 | return EFX_DWORD_FIELD(hdr, MCDI_HEADER_RESPONSE); | |
676 | } | |
677 | ||
678 | static void | |
679 | efx_ef10_mcdi_read_response(struct efx_nic *efx, efx_dword_t *outbuf, | |
680 | size_t offset, size_t outlen) | |
681 | { | |
682 | struct efx_ef10_nic_data *nic_data = efx->nic_data; | |
683 | const u8 *pdu = nic_data->mcdi_buf.addr; | |
684 | ||
685 | memcpy(outbuf, pdu + offset, outlen); | |
686 | } | |
687 | ||
688 | static int efx_ef10_mcdi_poll_reboot(struct efx_nic *efx) | |
689 | { | |
690 | struct efx_ef10_nic_data *nic_data = efx->nic_data; | |
691 | int rc; | |
692 | ||
693 | rc = efx_ef10_get_warm_boot_count(efx); | |
694 | if (rc < 0) { | |
695 | /* The firmware is presumably in the process of | |
696 | * rebooting. However, we are supposed to report each | |
697 | * reboot just once, so we must only do that once we | |
698 | * can read and store the updated warm boot count. | |
699 | */ | |
700 | return 0; | |
701 | } | |
702 | ||
703 | if (rc == nic_data->warm_boot_count) | |
704 | return 0; | |
705 | ||
706 | nic_data->warm_boot_count = rc; | |
707 | ||
708 | /* All our allocations have been reset */ | |
709 | nic_data->must_realloc_vis = true; | |
710 | nic_data->must_restore_filters = true; | |
711 | nic_data->rx_rss_context = EFX_EF10_RSS_CONTEXT_INVALID; | |
712 | ||
713 | return -EIO; | |
714 | } | |
715 | ||
716 | /* Handle an MSI interrupt | |
717 | * | |
718 | * Handle an MSI hardware interrupt. This routine schedules event | |
719 | * queue processing. No interrupt acknowledgement cycle is necessary. | |
720 | * Also, we never need to check that the interrupt is for us, since | |
721 | * MSI interrupts cannot be shared. | |
722 | */ | |
723 | static irqreturn_t efx_ef10_msi_interrupt(int irq, void *dev_id) | |
724 | { | |
725 | struct efx_msi_context *context = dev_id; | |
726 | struct efx_nic *efx = context->efx; | |
727 | ||
728 | netif_vdbg(efx, intr, efx->net_dev, | |
729 | "IRQ %d on CPU %d\n", irq, raw_smp_processor_id()); | |
730 | ||
731 | if (likely(ACCESS_ONCE(efx->irq_soft_enabled))) { | |
732 | /* Note test interrupts */ | |
733 | if (context->index == efx->irq_level) | |
734 | efx->last_irq_cpu = raw_smp_processor_id(); | |
735 | ||
736 | /* Schedule processing of the channel */ | |
737 | efx_schedule_channel_irq(efx->channel[context->index]); | |
738 | } | |
739 | ||
740 | return IRQ_HANDLED; | |
741 | } | |
742 | ||
743 | static irqreturn_t efx_ef10_legacy_interrupt(int irq, void *dev_id) | |
744 | { | |
745 | struct efx_nic *efx = dev_id; | |
746 | bool soft_enabled = ACCESS_ONCE(efx->irq_soft_enabled); | |
747 | struct efx_channel *channel; | |
748 | efx_dword_t reg; | |
749 | u32 queues; | |
750 | ||
751 | /* Read the ISR which also ACKs the interrupts */ | |
752 | efx_readd(efx, ®, ER_DZ_BIU_INT_ISR); | |
753 | queues = EFX_DWORD_FIELD(reg, ERF_DZ_ISR_REG); | |
754 | ||
755 | if (queues == 0) | |
756 | return IRQ_NONE; | |
757 | ||
758 | if (likely(soft_enabled)) { | |
759 | /* Note test interrupts */ | |
760 | if (queues & (1U << efx->irq_level)) | |
761 | efx->last_irq_cpu = raw_smp_processor_id(); | |
762 | ||
763 | efx_for_each_channel(channel, efx) { | |
764 | if (queues & 1) | |
765 | efx_schedule_channel_irq(channel); | |
766 | queues >>= 1; | |
767 | } | |
768 | } | |
769 | ||
770 | netif_vdbg(efx, intr, efx->net_dev, | |
771 | "IRQ %d on CPU %d status " EFX_DWORD_FMT "\n", | |
772 | irq, raw_smp_processor_id(), EFX_DWORD_VAL(reg)); | |
773 | ||
774 | return IRQ_HANDLED; | |
775 | } | |
776 | ||
777 | static void efx_ef10_irq_test_generate(struct efx_nic *efx) | |
778 | { | |
779 | MCDI_DECLARE_BUF(inbuf, MC_CMD_TRIGGER_INTERRUPT_IN_LEN); | |
780 | ||
781 | BUILD_BUG_ON(MC_CMD_TRIGGER_INTERRUPT_OUT_LEN != 0); | |
782 | ||
783 | MCDI_SET_DWORD(inbuf, TRIGGER_INTERRUPT_IN_INTR_LEVEL, efx->irq_level); | |
784 | (void) efx_mcdi_rpc(efx, MC_CMD_TRIGGER_INTERRUPT, | |
785 | inbuf, sizeof(inbuf), NULL, 0, NULL); | |
786 | } | |
787 | ||
788 | static int efx_ef10_tx_probe(struct efx_tx_queue *tx_queue) | |
789 | { | |
790 | return efx_nic_alloc_buffer(tx_queue->efx, &tx_queue->txd.buf, | |
791 | (tx_queue->ptr_mask + 1) * | |
792 | sizeof(efx_qword_t), | |
793 | GFP_KERNEL); | |
794 | } | |
795 | ||
796 | /* This writes to the TX_DESC_WPTR and also pushes data */ | |
797 | static inline void efx_ef10_push_tx_desc(struct efx_tx_queue *tx_queue, | |
798 | const efx_qword_t *txd) | |
799 | { | |
800 | unsigned int write_ptr; | |
801 | efx_oword_t reg; | |
802 | ||
803 | write_ptr = tx_queue->write_count & tx_queue->ptr_mask; | |
804 | EFX_POPULATE_OWORD_1(reg, ERF_DZ_TX_DESC_WPTR, write_ptr); | |
805 | reg.qword[0] = *txd; | |
806 | efx_writeo_page(tx_queue->efx, ®, | |
807 | ER_DZ_TX_DESC_UPD, tx_queue->queue); | |
808 | } | |
809 | ||
810 | static void efx_ef10_tx_init(struct efx_tx_queue *tx_queue) | |
811 | { | |
812 | MCDI_DECLARE_BUF(inbuf, MC_CMD_INIT_TXQ_IN_LEN(EFX_MAX_DMAQ_SIZE * 8 / | |
813 | EFX_BUF_SIZE)); | |
814 | MCDI_DECLARE_BUF(outbuf, MC_CMD_INIT_TXQ_OUT_LEN); | |
815 | bool csum_offload = tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD; | |
816 | size_t entries = tx_queue->txd.buf.len / EFX_BUF_SIZE; | |
817 | struct efx_channel *channel = tx_queue->channel; | |
818 | struct efx_nic *efx = tx_queue->efx; | |
819 | size_t inlen, outlen; | |
820 | dma_addr_t dma_addr; | |
821 | efx_qword_t *txd; | |
822 | int rc; | |
823 | int i; | |
824 | ||
825 | MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_SIZE, tx_queue->ptr_mask + 1); | |
826 | MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_TARGET_EVQ, channel->channel); | |
827 | MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_LABEL, tx_queue->queue); | |
828 | MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_INSTANCE, tx_queue->queue); | |
829 | MCDI_POPULATE_DWORD_2(inbuf, INIT_TXQ_IN_FLAGS, | |
830 | INIT_TXQ_IN_FLAG_IP_CSUM_DIS, !csum_offload, | |
831 | INIT_TXQ_IN_FLAG_TCP_CSUM_DIS, !csum_offload); | |
832 | MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_OWNER_ID, 0); | |
833 | MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_PORT_ID, EVB_PORT_ID_ASSIGNED); | |
834 | ||
835 | dma_addr = tx_queue->txd.buf.dma_addr; | |
836 | ||
837 | netif_dbg(efx, hw, efx->net_dev, "pushing TXQ %d. %zu entries (%llx)\n", | |
838 | tx_queue->queue, entries, (u64)dma_addr); | |
839 | ||
840 | for (i = 0; i < entries; ++i) { | |
841 | MCDI_SET_ARRAY_QWORD(inbuf, INIT_TXQ_IN_DMA_ADDR, i, dma_addr); | |
842 | dma_addr += EFX_BUF_SIZE; | |
843 | } | |
844 | ||
845 | inlen = MC_CMD_INIT_TXQ_IN_LEN(entries); | |
846 | ||
847 | rc = efx_mcdi_rpc(efx, MC_CMD_INIT_TXQ, inbuf, inlen, | |
848 | outbuf, sizeof(outbuf), &outlen); | |
849 | if (rc) | |
850 | goto fail; | |
851 | ||
852 | /* A previous user of this TX queue might have set us up the | |
853 | * bomb by writing a descriptor to the TX push collector but | |
854 | * not the doorbell. (Each collector belongs to a port, not a | |
855 | * queue or function, so cannot easily be reset.) We must | |
856 | * attempt to push a no-op descriptor in its place. | |
857 | */ | |
858 | tx_queue->buffer[0].flags = EFX_TX_BUF_OPTION; | |
859 | tx_queue->insert_count = 1; | |
860 | txd = efx_tx_desc(tx_queue, 0); | |
861 | EFX_POPULATE_QWORD_4(*txd, | |
862 | ESF_DZ_TX_DESC_IS_OPT, true, | |
863 | ESF_DZ_TX_OPTION_TYPE, | |
864 | ESE_DZ_TX_OPTION_DESC_CRC_CSUM, | |
865 | ESF_DZ_TX_OPTION_UDP_TCP_CSUM, csum_offload, | |
866 | ESF_DZ_TX_OPTION_IP_CSUM, csum_offload); | |
867 | tx_queue->write_count = 1; | |
868 | wmb(); | |
869 | efx_ef10_push_tx_desc(tx_queue, txd); | |
870 | ||
871 | return; | |
872 | ||
873 | fail: | |
874 | WARN_ON(true); | |
875 | netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); | |
876 | } | |
877 | ||
878 | static void efx_ef10_tx_fini(struct efx_tx_queue *tx_queue) | |
879 | { | |
880 | MCDI_DECLARE_BUF(inbuf, MC_CMD_FINI_TXQ_IN_LEN); | |
881 | MCDI_DECLARE_BUF(outbuf, MC_CMD_FINI_TXQ_OUT_LEN); | |
882 | struct efx_nic *efx = tx_queue->efx; | |
883 | size_t outlen; | |
884 | int rc; | |
885 | ||
886 | MCDI_SET_DWORD(inbuf, FINI_TXQ_IN_INSTANCE, | |
887 | tx_queue->queue); | |
888 | ||
889 | rc = efx_mcdi_rpc(efx, MC_CMD_FINI_TXQ, inbuf, sizeof(inbuf), | |
890 | outbuf, sizeof(outbuf), &outlen); | |
891 | ||
892 | if (rc && rc != -EALREADY) | |
893 | goto fail; | |
894 | ||
895 | return; | |
896 | ||
897 | fail: | |
898 | netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); | |
899 | } | |
900 | ||
901 | static void efx_ef10_tx_remove(struct efx_tx_queue *tx_queue) | |
902 | { | |
903 | efx_nic_free_buffer(tx_queue->efx, &tx_queue->txd.buf); | |
904 | } | |
905 | ||
906 | /* This writes to the TX_DESC_WPTR; write pointer for TX descriptor ring */ | |
907 | static inline void efx_ef10_notify_tx_desc(struct efx_tx_queue *tx_queue) | |
908 | { | |
909 | unsigned int write_ptr; | |
910 | efx_dword_t reg; | |
911 | ||
912 | write_ptr = tx_queue->write_count & tx_queue->ptr_mask; | |
913 | EFX_POPULATE_DWORD_1(reg, ERF_DZ_TX_DESC_WPTR_DWORD, write_ptr); | |
914 | efx_writed_page(tx_queue->efx, ®, | |
915 | ER_DZ_TX_DESC_UPD_DWORD, tx_queue->queue); | |
916 | } | |
917 | ||
918 | static void efx_ef10_tx_write(struct efx_tx_queue *tx_queue) | |
919 | { | |
920 | unsigned int old_write_count = tx_queue->write_count; | |
921 | struct efx_tx_buffer *buffer; | |
922 | unsigned int write_ptr; | |
923 | efx_qword_t *txd; | |
924 | ||
925 | BUG_ON(tx_queue->write_count == tx_queue->insert_count); | |
926 | ||
927 | do { | |
928 | write_ptr = tx_queue->write_count & tx_queue->ptr_mask; | |
929 | buffer = &tx_queue->buffer[write_ptr]; | |
930 | txd = efx_tx_desc(tx_queue, write_ptr); | |
931 | ++tx_queue->write_count; | |
932 | ||
933 | /* Create TX descriptor ring entry */ | |
934 | if (buffer->flags & EFX_TX_BUF_OPTION) { | |
935 | *txd = buffer->option; | |
936 | } else { | |
937 | BUILD_BUG_ON(EFX_TX_BUF_CONT != 1); | |
938 | EFX_POPULATE_QWORD_3( | |
939 | *txd, | |
940 | ESF_DZ_TX_KER_CONT, | |
941 | buffer->flags & EFX_TX_BUF_CONT, | |
942 | ESF_DZ_TX_KER_BYTE_CNT, buffer->len, | |
943 | ESF_DZ_TX_KER_BUF_ADDR, buffer->dma_addr); | |
944 | } | |
945 | } while (tx_queue->write_count != tx_queue->insert_count); | |
946 | ||
947 | wmb(); /* Ensure descriptors are written before they are fetched */ | |
948 | ||
949 | if (efx_nic_may_push_tx_desc(tx_queue, old_write_count)) { | |
950 | txd = efx_tx_desc(tx_queue, | |
951 | old_write_count & tx_queue->ptr_mask); | |
952 | efx_ef10_push_tx_desc(tx_queue, txd); | |
953 | ++tx_queue->pushes; | |
954 | } else { | |
955 | efx_ef10_notify_tx_desc(tx_queue); | |
956 | } | |
957 | } | |
958 | ||
959 | static int efx_ef10_alloc_rss_context(struct efx_nic *efx, u32 *context) | |
960 | { | |
961 | MCDI_DECLARE_BUF(inbuf, MC_CMD_RSS_CONTEXT_ALLOC_IN_LEN); | |
962 | MCDI_DECLARE_BUF(outbuf, MC_CMD_RSS_CONTEXT_ALLOC_OUT_LEN); | |
963 | size_t outlen; | |
964 | int rc; | |
965 | ||
966 | MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_UPSTREAM_PORT_ID, | |
967 | EVB_PORT_ID_ASSIGNED); | |
968 | MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_TYPE, | |
969 | MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_EXCLUSIVE); | |
970 | MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_NUM_QUEUES, | |
971 | EFX_MAX_CHANNELS); | |
972 | ||
973 | rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_ALLOC, inbuf, sizeof(inbuf), | |
974 | outbuf, sizeof(outbuf), &outlen); | |
975 | if (rc != 0) | |
976 | return rc; | |
977 | ||
978 | if (outlen < MC_CMD_RSS_CONTEXT_ALLOC_OUT_LEN) | |
979 | return -EIO; | |
980 | ||
981 | *context = MCDI_DWORD(outbuf, RSS_CONTEXT_ALLOC_OUT_RSS_CONTEXT_ID); | |
982 | ||
983 | return 0; | |
984 | } | |
985 | ||
986 | static void efx_ef10_free_rss_context(struct efx_nic *efx, u32 context) | |
987 | { | |
988 | MCDI_DECLARE_BUF(inbuf, MC_CMD_RSS_CONTEXT_FREE_IN_LEN); | |
989 | int rc; | |
990 | ||
991 | MCDI_SET_DWORD(inbuf, RSS_CONTEXT_FREE_IN_RSS_CONTEXT_ID, | |
992 | context); | |
993 | ||
994 | rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_FREE, inbuf, sizeof(inbuf), | |
995 | NULL, 0, NULL); | |
996 | WARN_ON(rc != 0); | |
997 | } | |
998 | ||
999 | static int efx_ef10_populate_rss_table(struct efx_nic *efx, u32 context) | |
1000 | { | |
1001 | MCDI_DECLARE_BUF(tablebuf, MC_CMD_RSS_CONTEXT_SET_TABLE_IN_LEN); | |
1002 | MCDI_DECLARE_BUF(keybuf, MC_CMD_RSS_CONTEXT_SET_KEY_IN_LEN); | |
1003 | int i, rc; | |
1004 | ||
1005 | MCDI_SET_DWORD(tablebuf, RSS_CONTEXT_SET_TABLE_IN_RSS_CONTEXT_ID, | |
1006 | context); | |
1007 | BUILD_BUG_ON(ARRAY_SIZE(efx->rx_indir_table) != | |
1008 | MC_CMD_RSS_CONTEXT_SET_TABLE_IN_INDIRECTION_TABLE_LEN); | |
1009 | ||
1010 | for (i = 0; i < ARRAY_SIZE(efx->rx_indir_table); ++i) | |
1011 | MCDI_PTR(tablebuf, | |
1012 | RSS_CONTEXT_SET_TABLE_IN_INDIRECTION_TABLE)[i] = | |
1013 | (u8) efx->rx_indir_table[i]; | |
1014 | ||
1015 | rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_SET_TABLE, tablebuf, | |
1016 | sizeof(tablebuf), NULL, 0, NULL); | |
1017 | if (rc != 0) | |
1018 | return rc; | |
1019 | ||
1020 | MCDI_SET_DWORD(keybuf, RSS_CONTEXT_SET_KEY_IN_RSS_CONTEXT_ID, | |
1021 | context); | |
1022 | BUILD_BUG_ON(ARRAY_SIZE(efx->rx_hash_key) != | |
1023 | MC_CMD_RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY_LEN); | |
1024 | for (i = 0; i < ARRAY_SIZE(efx->rx_hash_key); ++i) | |
1025 | MCDI_PTR(keybuf, RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY)[i] = | |
1026 | efx->rx_hash_key[i]; | |
1027 | ||
1028 | return efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_SET_KEY, keybuf, | |
1029 | sizeof(keybuf), NULL, 0, NULL); | |
1030 | } | |
1031 | ||
1032 | static void efx_ef10_rx_free_indir_table(struct efx_nic *efx) | |
1033 | { | |
1034 | struct efx_ef10_nic_data *nic_data = efx->nic_data; | |
1035 | ||
1036 | if (nic_data->rx_rss_context != EFX_EF10_RSS_CONTEXT_INVALID) | |
1037 | efx_ef10_free_rss_context(efx, nic_data->rx_rss_context); | |
1038 | nic_data->rx_rss_context = EFX_EF10_RSS_CONTEXT_INVALID; | |
1039 | } | |
1040 | ||
1041 | static void efx_ef10_rx_push_indir_table(struct efx_nic *efx) | |
1042 | { | |
1043 | struct efx_ef10_nic_data *nic_data = efx->nic_data; | |
1044 | int rc; | |
1045 | ||
1046 | netif_dbg(efx, drv, efx->net_dev, "pushing RX indirection table\n"); | |
1047 | ||
1048 | if (nic_data->rx_rss_context == EFX_EF10_RSS_CONTEXT_INVALID) { | |
1049 | rc = efx_ef10_alloc_rss_context(efx, &nic_data->rx_rss_context); | |
1050 | if (rc != 0) | |
1051 | goto fail; | |
1052 | } | |
1053 | ||
1054 | rc = efx_ef10_populate_rss_table(efx, nic_data->rx_rss_context); | |
1055 | if (rc != 0) | |
1056 | goto fail; | |
1057 | ||
1058 | return; | |
1059 | ||
1060 | fail: | |
1061 | netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); | |
1062 | } | |
1063 | ||
1064 | static int efx_ef10_rx_probe(struct efx_rx_queue *rx_queue) | |
1065 | { | |
1066 | return efx_nic_alloc_buffer(rx_queue->efx, &rx_queue->rxd.buf, | |
1067 | (rx_queue->ptr_mask + 1) * | |
1068 | sizeof(efx_qword_t), | |
1069 | GFP_KERNEL); | |
1070 | } | |
1071 | ||
1072 | static void efx_ef10_rx_init(struct efx_rx_queue *rx_queue) | |
1073 | { | |
1074 | MCDI_DECLARE_BUF(inbuf, | |
1075 | MC_CMD_INIT_RXQ_IN_LEN(EFX_MAX_DMAQ_SIZE * 8 / | |
1076 | EFX_BUF_SIZE)); | |
1077 | MCDI_DECLARE_BUF(outbuf, MC_CMD_INIT_RXQ_OUT_LEN); | |
1078 | struct efx_channel *channel = efx_rx_queue_channel(rx_queue); | |
1079 | size_t entries = rx_queue->rxd.buf.len / EFX_BUF_SIZE; | |
1080 | struct efx_nic *efx = rx_queue->efx; | |
1081 | size_t inlen, outlen; | |
1082 | dma_addr_t dma_addr; | |
1083 | int rc; | |
1084 | int i; | |
1085 | ||
1086 | rx_queue->scatter_n = 0; | |
1087 | rx_queue->scatter_len = 0; | |
1088 | ||
1089 | MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_SIZE, rx_queue->ptr_mask + 1); | |
1090 | MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_TARGET_EVQ, channel->channel); | |
1091 | MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_LABEL, efx_rx_queue_index(rx_queue)); | |
1092 | MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_INSTANCE, | |
1093 | efx_rx_queue_index(rx_queue)); | |
1094 | MCDI_POPULATE_DWORD_1(inbuf, INIT_RXQ_IN_FLAGS, | |
1095 | INIT_RXQ_IN_FLAG_PREFIX, 1); | |
1096 | MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_OWNER_ID, 0); | |
1097 | MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_PORT_ID, EVB_PORT_ID_ASSIGNED); | |
1098 | ||
1099 | dma_addr = rx_queue->rxd.buf.dma_addr; | |
1100 | ||
1101 | netif_dbg(efx, hw, efx->net_dev, "pushing RXQ %d. %zu entries (%llx)\n", | |
1102 | efx_rx_queue_index(rx_queue), entries, (u64)dma_addr); | |
1103 | ||
1104 | for (i = 0; i < entries; ++i) { | |
1105 | MCDI_SET_ARRAY_QWORD(inbuf, INIT_RXQ_IN_DMA_ADDR, i, dma_addr); | |
1106 | dma_addr += EFX_BUF_SIZE; | |
1107 | } | |
1108 | ||
1109 | inlen = MC_CMD_INIT_RXQ_IN_LEN(entries); | |
1110 | ||
1111 | rc = efx_mcdi_rpc(efx, MC_CMD_INIT_RXQ, inbuf, inlen, | |
1112 | outbuf, sizeof(outbuf), &outlen); | |
1113 | if (rc) | |
1114 | goto fail; | |
1115 | ||
1116 | return; | |
1117 | ||
1118 | fail: | |
1119 | WARN_ON(true); | |
1120 | netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); | |
1121 | } | |
1122 | ||
1123 | static void efx_ef10_rx_fini(struct efx_rx_queue *rx_queue) | |
1124 | { | |
1125 | MCDI_DECLARE_BUF(inbuf, MC_CMD_FINI_RXQ_IN_LEN); | |
1126 | MCDI_DECLARE_BUF(outbuf, MC_CMD_FINI_RXQ_OUT_LEN); | |
1127 | struct efx_nic *efx = rx_queue->efx; | |
1128 | size_t outlen; | |
1129 | int rc; | |
1130 | ||
1131 | MCDI_SET_DWORD(inbuf, FINI_RXQ_IN_INSTANCE, | |
1132 | efx_rx_queue_index(rx_queue)); | |
1133 | ||
1134 | rc = efx_mcdi_rpc(efx, MC_CMD_FINI_RXQ, inbuf, sizeof(inbuf), | |
1135 | outbuf, sizeof(outbuf), &outlen); | |
1136 | ||
1137 | if (rc && rc != -EALREADY) | |
1138 | goto fail; | |
1139 | ||
1140 | return; | |
1141 | ||
1142 | fail: | |
1143 | netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); | |
1144 | } | |
1145 | ||
1146 | static void efx_ef10_rx_remove(struct efx_rx_queue *rx_queue) | |
1147 | { | |
1148 | efx_nic_free_buffer(rx_queue->efx, &rx_queue->rxd.buf); | |
1149 | } | |
1150 | ||
1151 | /* This creates an entry in the RX descriptor queue */ | |
1152 | static inline void | |
1153 | efx_ef10_build_rx_desc(struct efx_rx_queue *rx_queue, unsigned int index) | |
1154 | { | |
1155 | struct efx_rx_buffer *rx_buf; | |
1156 | efx_qword_t *rxd; | |
1157 | ||
1158 | rxd = efx_rx_desc(rx_queue, index); | |
1159 | rx_buf = efx_rx_buffer(rx_queue, index); | |
1160 | EFX_POPULATE_QWORD_2(*rxd, | |
1161 | ESF_DZ_RX_KER_BYTE_CNT, rx_buf->len, | |
1162 | ESF_DZ_RX_KER_BUF_ADDR, rx_buf->dma_addr); | |
1163 | } | |
1164 | ||
1165 | static void efx_ef10_rx_write(struct efx_rx_queue *rx_queue) | |
1166 | { | |
1167 | struct efx_nic *efx = rx_queue->efx; | |
1168 | unsigned int write_count; | |
1169 | efx_dword_t reg; | |
1170 | ||
1171 | /* Firmware requires that RX_DESC_WPTR be a multiple of 8 */ | |
1172 | write_count = rx_queue->added_count & ~7; | |
1173 | if (rx_queue->notified_count == write_count) | |
1174 | return; | |
1175 | ||
1176 | do | |
1177 | efx_ef10_build_rx_desc( | |
1178 | rx_queue, | |
1179 | rx_queue->notified_count & rx_queue->ptr_mask); | |
1180 | while (++rx_queue->notified_count != write_count); | |
1181 | ||
1182 | wmb(); | |
1183 | EFX_POPULATE_DWORD_1(reg, ERF_DZ_RX_DESC_WPTR, | |
1184 | write_count & rx_queue->ptr_mask); | |
1185 | efx_writed_page(efx, ®, ER_DZ_RX_DESC_UPD, | |
1186 | efx_rx_queue_index(rx_queue)); | |
1187 | } | |
1188 | ||
1189 | static efx_mcdi_async_completer efx_ef10_rx_defer_refill_complete; | |
1190 | ||
1191 | static void efx_ef10_rx_defer_refill(struct efx_rx_queue *rx_queue) | |
1192 | { | |
1193 | struct efx_channel *channel = efx_rx_queue_channel(rx_queue); | |
1194 | MCDI_DECLARE_BUF(inbuf, MC_CMD_DRIVER_EVENT_IN_LEN); | |
1195 | efx_qword_t event; | |
1196 | ||
1197 | EFX_POPULATE_QWORD_2(event, | |
1198 | ESF_DZ_EV_CODE, EFX_EF10_DRVGEN_EV, | |
1199 | ESF_DZ_EV_DATA, EFX_EF10_REFILL); | |
1200 | ||
1201 | MCDI_SET_DWORD(inbuf, DRIVER_EVENT_IN_EVQ, channel->channel); | |
1202 | ||
1203 | /* MCDI_SET_QWORD is not appropriate here since EFX_POPULATE_* has | |
1204 | * already swapped the data to little-endian order. | |
1205 | */ | |
1206 | memcpy(MCDI_PTR(inbuf, DRIVER_EVENT_IN_DATA), &event.u64[0], | |
1207 | sizeof(efx_qword_t)); | |
1208 | ||
1209 | efx_mcdi_rpc_async(channel->efx, MC_CMD_DRIVER_EVENT, | |
1210 | inbuf, sizeof(inbuf), 0, | |
1211 | efx_ef10_rx_defer_refill_complete, 0); | |
1212 | } | |
1213 | ||
1214 | static void | |
1215 | efx_ef10_rx_defer_refill_complete(struct efx_nic *efx, unsigned long cookie, | |
1216 | int rc, efx_dword_t *outbuf, | |
1217 | size_t outlen_actual) | |
1218 | { | |
1219 | /* nothing to do */ | |
1220 | } | |
1221 | ||
1222 | static int efx_ef10_ev_probe(struct efx_channel *channel) | |
1223 | { | |
1224 | return efx_nic_alloc_buffer(channel->efx, &channel->eventq.buf, | |
1225 | (channel->eventq_mask + 1) * | |
1226 | sizeof(efx_qword_t), | |
1227 | GFP_KERNEL); | |
1228 | } | |
1229 | ||
1230 | static int efx_ef10_ev_init(struct efx_channel *channel) | |
1231 | { | |
1232 | MCDI_DECLARE_BUF(inbuf, | |
1233 | MC_CMD_INIT_EVQ_IN_LEN(EFX_MAX_EVQ_SIZE * 8 / | |
1234 | EFX_BUF_SIZE)); | |
1235 | MCDI_DECLARE_BUF(outbuf, MC_CMD_INIT_EVQ_OUT_LEN); | |
1236 | size_t entries = channel->eventq.buf.len / EFX_BUF_SIZE; | |
1237 | struct efx_nic *efx = channel->efx; | |
1238 | struct efx_ef10_nic_data *nic_data; | |
1239 | bool supports_rx_merge; | |
1240 | size_t inlen, outlen; | |
1241 | dma_addr_t dma_addr; | |
1242 | int rc; | |
1243 | int i; | |
1244 | ||
1245 | nic_data = efx->nic_data; | |
1246 | supports_rx_merge = | |
1247 | !!(nic_data->datapath_caps & | |
1248 | 1 << MC_CMD_GET_CAPABILITIES_OUT_RX_BATCHING_LBN); | |
1249 | ||
1250 | /* Fill event queue with all ones (i.e. empty events) */ | |
1251 | memset(channel->eventq.buf.addr, 0xff, channel->eventq.buf.len); | |
1252 | ||
1253 | MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_SIZE, channel->eventq_mask + 1); | |
1254 | MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_INSTANCE, channel->channel); | |
1255 | /* INIT_EVQ expects index in vector table, not absolute */ | |
1256 | MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_IRQ_NUM, channel->channel); | |
1257 | MCDI_POPULATE_DWORD_4(inbuf, INIT_EVQ_IN_FLAGS, | |
1258 | INIT_EVQ_IN_FLAG_INTERRUPTING, 1, | |
1259 | INIT_EVQ_IN_FLAG_RX_MERGE, 1, | |
1260 | INIT_EVQ_IN_FLAG_TX_MERGE, 1, | |
1261 | INIT_EVQ_IN_FLAG_CUT_THRU, !supports_rx_merge); | |
1262 | MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_TMR_MODE, | |
1263 | MC_CMD_INIT_EVQ_IN_TMR_MODE_DIS); | |
1264 | MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_TMR_LOAD, 0); | |
1265 | MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_TMR_RELOAD, 0); | |
1266 | MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_COUNT_MODE, | |
1267 | MC_CMD_INIT_EVQ_IN_COUNT_MODE_DIS); | |
1268 | MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_COUNT_THRSHLD, 0); | |
1269 | ||
1270 | dma_addr = channel->eventq.buf.dma_addr; | |
1271 | for (i = 0; i < entries; ++i) { | |
1272 | MCDI_SET_ARRAY_QWORD(inbuf, INIT_EVQ_IN_DMA_ADDR, i, dma_addr); | |
1273 | dma_addr += EFX_BUF_SIZE; | |
1274 | } | |
1275 | ||
1276 | inlen = MC_CMD_INIT_EVQ_IN_LEN(entries); | |
1277 | ||
1278 | rc = efx_mcdi_rpc(efx, MC_CMD_INIT_EVQ, inbuf, inlen, | |
1279 | outbuf, sizeof(outbuf), &outlen); | |
1280 | if (rc) | |
1281 | goto fail; | |
1282 | ||
1283 | /* IRQ return is ignored */ | |
1284 | ||
1285 | return 0; | |
1286 | ||
1287 | fail: | |
1288 | netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); | |
1289 | return rc; | |
1290 | } | |
1291 | ||
1292 | static void efx_ef10_ev_fini(struct efx_channel *channel) | |
1293 | { | |
1294 | MCDI_DECLARE_BUF(inbuf, MC_CMD_FINI_EVQ_IN_LEN); | |
1295 | MCDI_DECLARE_BUF(outbuf, MC_CMD_FINI_EVQ_OUT_LEN); | |
1296 | struct efx_nic *efx = channel->efx; | |
1297 | size_t outlen; | |
1298 | int rc; | |
1299 | ||
1300 | MCDI_SET_DWORD(inbuf, FINI_EVQ_IN_INSTANCE, channel->channel); | |
1301 | ||
1302 | rc = efx_mcdi_rpc(efx, MC_CMD_FINI_EVQ, inbuf, sizeof(inbuf), | |
1303 | outbuf, sizeof(outbuf), &outlen); | |
1304 | ||
1305 | if (rc && rc != -EALREADY) | |
1306 | goto fail; | |
1307 | ||
1308 | return; | |
1309 | ||
1310 | fail: | |
1311 | netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); | |
1312 | } | |
1313 | ||
1314 | static void efx_ef10_ev_remove(struct efx_channel *channel) | |
1315 | { | |
1316 | efx_nic_free_buffer(channel->efx, &channel->eventq.buf); | |
1317 | } | |
1318 | ||
1319 | static void efx_ef10_handle_rx_wrong_queue(struct efx_rx_queue *rx_queue, | |
1320 | unsigned int rx_queue_label) | |
1321 | { | |
1322 | struct efx_nic *efx = rx_queue->efx; | |
1323 | ||
1324 | netif_info(efx, hw, efx->net_dev, | |
1325 | "rx event arrived on queue %d labeled as queue %u\n", | |
1326 | efx_rx_queue_index(rx_queue), rx_queue_label); | |
1327 | ||
1328 | efx_schedule_reset(efx, RESET_TYPE_DISABLE); | |
1329 | } | |
1330 | ||
1331 | static void | |
1332 | efx_ef10_handle_rx_bad_lbits(struct efx_rx_queue *rx_queue, | |
1333 | unsigned int actual, unsigned int expected) | |
1334 | { | |
1335 | unsigned int dropped = (actual - expected) & rx_queue->ptr_mask; | |
1336 | struct efx_nic *efx = rx_queue->efx; | |
1337 | ||
1338 | netif_info(efx, hw, efx->net_dev, | |
1339 | "dropped %d events (index=%d expected=%d)\n", | |
1340 | dropped, actual, expected); | |
1341 | ||
1342 | efx_schedule_reset(efx, RESET_TYPE_DISABLE); | |
1343 | } | |
1344 | ||
1345 | /* partially received RX was aborted. clean up. */ | |
1346 | static void efx_ef10_handle_rx_abort(struct efx_rx_queue *rx_queue) | |
1347 | { | |
1348 | unsigned int rx_desc_ptr; | |
1349 | ||
1350 | WARN_ON(rx_queue->scatter_n == 0); | |
1351 | ||
1352 | netif_dbg(rx_queue->efx, hw, rx_queue->efx->net_dev, | |
1353 | "scattered RX aborted (dropping %u buffers)\n", | |
1354 | rx_queue->scatter_n); | |
1355 | ||
1356 | rx_desc_ptr = rx_queue->removed_count & rx_queue->ptr_mask; | |
1357 | ||
1358 | efx_rx_packet(rx_queue, rx_desc_ptr, rx_queue->scatter_n, | |
1359 | 0, EFX_RX_PKT_DISCARD); | |
1360 | ||
1361 | rx_queue->removed_count += rx_queue->scatter_n; | |
1362 | rx_queue->scatter_n = 0; | |
1363 | rx_queue->scatter_len = 0; | |
1364 | ++efx_rx_queue_channel(rx_queue)->n_rx_nodesc_trunc; | |
1365 | } | |
1366 | ||
1367 | static int efx_ef10_handle_rx_event(struct efx_channel *channel, | |
1368 | const efx_qword_t *event) | |
1369 | { | |
1370 | unsigned int rx_bytes, next_ptr_lbits, rx_queue_label, rx_l4_class; | |
1371 | unsigned int n_descs, n_packets, i; | |
1372 | struct efx_nic *efx = channel->efx; | |
1373 | struct efx_rx_queue *rx_queue; | |
1374 | bool rx_cont; | |
1375 | u16 flags = 0; | |
1376 | ||
1377 | if (unlikely(ACCESS_ONCE(efx->reset_pending))) | |
1378 | return 0; | |
1379 | ||
1380 | /* Basic packet information */ | |
1381 | rx_bytes = EFX_QWORD_FIELD(*event, ESF_DZ_RX_BYTES); | |
1382 | next_ptr_lbits = EFX_QWORD_FIELD(*event, ESF_DZ_RX_DSC_PTR_LBITS); | |
1383 | rx_queue_label = EFX_QWORD_FIELD(*event, ESF_DZ_RX_QLABEL); | |
1384 | rx_l4_class = EFX_QWORD_FIELD(*event, ESF_DZ_RX_L4_CLASS); | |
1385 | rx_cont = EFX_QWORD_FIELD(*event, ESF_DZ_RX_CONT); | |
1386 | ||
1387 | WARN_ON(EFX_QWORD_FIELD(*event, ESF_DZ_RX_DROP_EVENT)); | |
1388 | ||
1389 | rx_queue = efx_channel_get_rx_queue(channel); | |
1390 | ||
1391 | if (unlikely(rx_queue_label != efx_rx_queue_index(rx_queue))) | |
1392 | efx_ef10_handle_rx_wrong_queue(rx_queue, rx_queue_label); | |
1393 | ||
1394 | n_descs = ((next_ptr_lbits - rx_queue->removed_count) & | |
1395 | ((1 << ESF_DZ_RX_DSC_PTR_LBITS_WIDTH) - 1)); | |
1396 | ||
1397 | if (n_descs != rx_queue->scatter_n + 1) { | |
1398 | /* detect rx abort */ | |
1399 | if (unlikely(n_descs == rx_queue->scatter_n)) { | |
1400 | WARN_ON(rx_bytes != 0); | |
1401 | efx_ef10_handle_rx_abort(rx_queue); | |
1402 | return 0; | |
1403 | } | |
1404 | ||
1405 | if (unlikely(rx_queue->scatter_n != 0)) { | |
1406 | /* Scattered packet completions cannot be | |
1407 | * merged, so something has gone wrong. | |
1408 | */ | |
1409 | efx_ef10_handle_rx_bad_lbits( | |
1410 | rx_queue, next_ptr_lbits, | |
1411 | (rx_queue->removed_count + | |
1412 | rx_queue->scatter_n + 1) & | |
1413 | ((1 << ESF_DZ_RX_DSC_PTR_LBITS_WIDTH) - 1)); | |
1414 | return 0; | |
1415 | } | |
1416 | ||
1417 | /* Merged completion for multiple non-scattered packets */ | |
1418 | rx_queue->scatter_n = 1; | |
1419 | rx_queue->scatter_len = 0; | |
1420 | n_packets = n_descs; | |
1421 | ++channel->n_rx_merge_events; | |
1422 | channel->n_rx_merge_packets += n_packets; | |
1423 | flags |= EFX_RX_PKT_PREFIX_LEN; | |
1424 | } else { | |
1425 | ++rx_queue->scatter_n; | |
1426 | rx_queue->scatter_len += rx_bytes; | |
1427 | if (rx_cont) | |
1428 | return 0; | |
1429 | n_packets = 1; | |
1430 | } | |
1431 | ||
1432 | if (unlikely(EFX_QWORD_FIELD(*event, ESF_DZ_RX_ECRC_ERR))) | |
1433 | flags |= EFX_RX_PKT_DISCARD; | |
1434 | ||
1435 | if (unlikely(EFX_QWORD_FIELD(*event, ESF_DZ_RX_IPCKSUM_ERR))) { | |
1436 | channel->n_rx_ip_hdr_chksum_err += n_packets; | |
1437 | } else if (unlikely(EFX_QWORD_FIELD(*event, | |
1438 | ESF_DZ_RX_TCPUDP_CKSUM_ERR))) { | |
1439 | channel->n_rx_tcp_udp_chksum_err += n_packets; | |
1440 | } else if (rx_l4_class == ESE_DZ_L4_CLASS_TCP || | |
1441 | rx_l4_class == ESE_DZ_L4_CLASS_UDP) { | |
1442 | flags |= EFX_RX_PKT_CSUMMED; | |
1443 | } | |
1444 | ||
1445 | if (rx_l4_class == ESE_DZ_L4_CLASS_TCP) | |
1446 | flags |= EFX_RX_PKT_TCP; | |
1447 | ||
1448 | channel->irq_mod_score += 2 * n_packets; | |
1449 | ||
1450 | /* Handle received packet(s) */ | |
1451 | for (i = 0; i < n_packets; i++) { | |
1452 | efx_rx_packet(rx_queue, | |
1453 | rx_queue->removed_count & rx_queue->ptr_mask, | |
1454 | rx_queue->scatter_n, rx_queue->scatter_len, | |
1455 | flags); | |
1456 | rx_queue->removed_count += rx_queue->scatter_n; | |
1457 | } | |
1458 | ||
1459 | rx_queue->scatter_n = 0; | |
1460 | rx_queue->scatter_len = 0; | |
1461 | ||
1462 | return n_packets; | |
1463 | } | |
1464 | ||
1465 | static int | |
1466 | efx_ef10_handle_tx_event(struct efx_channel *channel, efx_qword_t *event) | |
1467 | { | |
1468 | struct efx_nic *efx = channel->efx; | |
1469 | struct efx_tx_queue *tx_queue; | |
1470 | unsigned int tx_ev_desc_ptr; | |
1471 | unsigned int tx_ev_q_label; | |
1472 | int tx_descs = 0; | |
1473 | ||
1474 | if (unlikely(ACCESS_ONCE(efx->reset_pending))) | |
1475 | return 0; | |
1476 | ||
1477 | if (unlikely(EFX_QWORD_FIELD(*event, ESF_DZ_TX_DROP_EVENT))) | |
1478 | return 0; | |
1479 | ||
1480 | /* Transmit completion */ | |
1481 | tx_ev_desc_ptr = EFX_QWORD_FIELD(*event, ESF_DZ_TX_DESCR_INDX); | |
1482 | tx_ev_q_label = EFX_QWORD_FIELD(*event, ESF_DZ_TX_QLABEL); | |
1483 | tx_queue = efx_channel_get_tx_queue(channel, | |
1484 | tx_ev_q_label % EFX_TXQ_TYPES); | |
1485 | tx_descs = ((tx_ev_desc_ptr + 1 - tx_queue->read_count) & | |
1486 | tx_queue->ptr_mask); | |
1487 | efx_xmit_done(tx_queue, tx_ev_desc_ptr & tx_queue->ptr_mask); | |
1488 | ||
1489 | return tx_descs; | |
1490 | } | |
1491 | ||
1492 | static void | |
1493 | efx_ef10_handle_driver_event(struct efx_channel *channel, efx_qword_t *event) | |
1494 | { | |
1495 | struct efx_nic *efx = channel->efx; | |
1496 | int subcode; | |
1497 | ||
1498 | subcode = EFX_QWORD_FIELD(*event, ESF_DZ_DRV_SUB_CODE); | |
1499 | ||
1500 | switch (subcode) { | |
1501 | case ESE_DZ_DRV_TIMER_EV: | |
1502 | case ESE_DZ_DRV_WAKE_UP_EV: | |
1503 | break; | |
1504 | case ESE_DZ_DRV_START_UP_EV: | |
1505 | /* event queue init complete. ok. */ | |
1506 | break; | |
1507 | default: | |
1508 | netif_err(efx, hw, efx->net_dev, | |
1509 | "channel %d unknown driver event type %d" | |
1510 | " (data " EFX_QWORD_FMT ")\n", | |
1511 | channel->channel, subcode, | |
1512 | EFX_QWORD_VAL(*event)); | |
1513 | ||
1514 | } | |
1515 | } | |
1516 | ||
1517 | static void efx_ef10_handle_driver_generated_event(struct efx_channel *channel, | |
1518 | efx_qword_t *event) | |
1519 | { | |
1520 | struct efx_nic *efx = channel->efx; | |
1521 | u32 subcode; | |
1522 | ||
1523 | subcode = EFX_QWORD_FIELD(*event, EFX_DWORD_0); | |
1524 | ||
1525 | switch (subcode) { | |
1526 | case EFX_EF10_TEST: | |
1527 | channel->event_test_cpu = raw_smp_processor_id(); | |
1528 | break; | |
1529 | case EFX_EF10_REFILL: | |
1530 | /* The queue must be empty, so we won't receive any rx | |
1531 | * events, so efx_process_channel() won't refill the | |
1532 | * queue. Refill it here | |
1533 | */ | |
1534 | efx_fast_push_rx_descriptors(&channel->rx_queue); | |
1535 | break; | |
1536 | default: | |
1537 | netif_err(efx, hw, efx->net_dev, | |
1538 | "channel %d unknown driver event type %u" | |
1539 | " (data " EFX_QWORD_FMT ")\n", | |
1540 | channel->channel, (unsigned) subcode, | |
1541 | EFX_QWORD_VAL(*event)); | |
1542 | } | |
1543 | } | |
1544 | ||
1545 | static int efx_ef10_ev_process(struct efx_channel *channel, int quota) | |
1546 | { | |
1547 | struct efx_nic *efx = channel->efx; | |
1548 | efx_qword_t event, *p_event; | |
1549 | unsigned int read_ptr; | |
1550 | int ev_code; | |
1551 | int tx_descs = 0; | |
1552 | int spent = 0; | |
1553 | ||
1554 | read_ptr = channel->eventq_read_ptr; | |
1555 | ||
1556 | for (;;) { | |
1557 | p_event = efx_event(channel, read_ptr); | |
1558 | event = *p_event; | |
1559 | ||
1560 | if (!efx_event_present(&event)) | |
1561 | break; | |
1562 | ||
1563 | EFX_SET_QWORD(*p_event); | |
1564 | ||
1565 | ++read_ptr; | |
1566 | ||
1567 | ev_code = EFX_QWORD_FIELD(event, ESF_DZ_EV_CODE); | |
1568 | ||
1569 | netif_vdbg(efx, drv, efx->net_dev, | |
1570 | "processing event on %d " EFX_QWORD_FMT "\n", | |
1571 | channel->channel, EFX_QWORD_VAL(event)); | |
1572 | ||
1573 | switch (ev_code) { | |
1574 | case ESE_DZ_EV_CODE_MCDI_EV: | |
1575 | efx_mcdi_process_event(channel, &event); | |
1576 | break; | |
1577 | case ESE_DZ_EV_CODE_RX_EV: | |
1578 | spent += efx_ef10_handle_rx_event(channel, &event); | |
1579 | if (spent >= quota) { | |
1580 | /* XXX can we split a merged event to | |
1581 | * avoid going over-quota? | |
1582 | */ | |
1583 | spent = quota; | |
1584 | goto out; | |
1585 | } | |
1586 | break; | |
1587 | case ESE_DZ_EV_CODE_TX_EV: | |
1588 | tx_descs += efx_ef10_handle_tx_event(channel, &event); | |
1589 | if (tx_descs > efx->txq_entries) { | |
1590 | spent = quota; | |
1591 | goto out; | |
1592 | } else if (++spent == quota) { | |
1593 | goto out; | |
1594 | } | |
1595 | break; | |
1596 | case ESE_DZ_EV_CODE_DRIVER_EV: | |
1597 | efx_ef10_handle_driver_event(channel, &event); | |
1598 | if (++spent == quota) | |
1599 | goto out; | |
1600 | break; | |
1601 | case EFX_EF10_DRVGEN_EV: | |
1602 | efx_ef10_handle_driver_generated_event(channel, &event); | |
1603 | break; | |
1604 | default: | |
1605 | netif_err(efx, hw, efx->net_dev, | |
1606 | "channel %d unknown event type %d" | |
1607 | " (data " EFX_QWORD_FMT ")\n", | |
1608 | channel->channel, ev_code, | |
1609 | EFX_QWORD_VAL(event)); | |
1610 | } | |
1611 | } | |
1612 | ||
1613 | out: | |
1614 | channel->eventq_read_ptr = read_ptr; | |
1615 | return spent; | |
1616 | } | |
1617 | ||
1618 | static void efx_ef10_ev_read_ack(struct efx_channel *channel) | |
1619 | { | |
1620 | struct efx_nic *efx = channel->efx; | |
1621 | efx_dword_t rptr; | |
1622 | ||
1623 | if (EFX_EF10_WORKAROUND_35388(efx)) { | |
1624 | BUILD_BUG_ON(EFX_MIN_EVQ_SIZE < | |
1625 | (1 << ERF_DD_EVQ_IND_RPTR_WIDTH)); | |
1626 | BUILD_BUG_ON(EFX_MAX_EVQ_SIZE > | |
1627 | (1 << 2 * ERF_DD_EVQ_IND_RPTR_WIDTH)); | |
1628 | ||
1629 | EFX_POPULATE_DWORD_2(rptr, ERF_DD_EVQ_IND_RPTR_FLAGS, | |
1630 | EFE_DD_EVQ_IND_RPTR_FLAGS_HIGH, | |
1631 | ERF_DD_EVQ_IND_RPTR, | |
1632 | (channel->eventq_read_ptr & | |
1633 | channel->eventq_mask) >> | |
1634 | ERF_DD_EVQ_IND_RPTR_WIDTH); | |
1635 | efx_writed_page(efx, &rptr, ER_DD_EVQ_INDIRECT, | |
1636 | channel->channel); | |
1637 | EFX_POPULATE_DWORD_2(rptr, ERF_DD_EVQ_IND_RPTR_FLAGS, | |
1638 | EFE_DD_EVQ_IND_RPTR_FLAGS_LOW, | |
1639 | ERF_DD_EVQ_IND_RPTR, | |
1640 | channel->eventq_read_ptr & | |
1641 | ((1 << ERF_DD_EVQ_IND_RPTR_WIDTH) - 1)); | |
1642 | efx_writed_page(efx, &rptr, ER_DD_EVQ_INDIRECT, | |
1643 | channel->channel); | |
1644 | } else { | |
1645 | EFX_POPULATE_DWORD_1(rptr, ERF_DZ_EVQ_RPTR, | |
1646 | channel->eventq_read_ptr & | |
1647 | channel->eventq_mask); | |
1648 | efx_writed_page(efx, &rptr, ER_DZ_EVQ_RPTR, channel->channel); | |
1649 | } | |
1650 | } | |
1651 | ||
1652 | static void efx_ef10_ev_test_generate(struct efx_channel *channel) | |
1653 | { | |
1654 | MCDI_DECLARE_BUF(inbuf, MC_CMD_DRIVER_EVENT_IN_LEN); | |
1655 | struct efx_nic *efx = channel->efx; | |
1656 | efx_qword_t event; | |
1657 | int rc; | |
1658 | ||
1659 | EFX_POPULATE_QWORD_2(event, | |
1660 | ESF_DZ_EV_CODE, EFX_EF10_DRVGEN_EV, | |
1661 | ESF_DZ_EV_DATA, EFX_EF10_TEST); | |
1662 | ||
1663 | MCDI_SET_DWORD(inbuf, DRIVER_EVENT_IN_EVQ, channel->channel); | |
1664 | ||
1665 | /* MCDI_SET_QWORD is not appropriate here since EFX_POPULATE_* has | |
1666 | * already swapped the data to little-endian order. | |
1667 | */ | |
1668 | memcpy(MCDI_PTR(inbuf, DRIVER_EVENT_IN_DATA), &event.u64[0], | |
1669 | sizeof(efx_qword_t)); | |
1670 | ||
1671 | rc = efx_mcdi_rpc(efx, MC_CMD_DRIVER_EVENT, inbuf, sizeof(inbuf), | |
1672 | NULL, 0, NULL); | |
1673 | if (rc != 0) | |
1674 | goto fail; | |
1675 | ||
1676 | return; | |
1677 | ||
1678 | fail: | |
1679 | WARN_ON(true); | |
1680 | netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); | |
1681 | } | |
1682 | ||
1683 | void efx_ef10_handle_drain_event(struct efx_nic *efx) | |
1684 | { | |
1685 | if (atomic_dec_and_test(&efx->active_queues)) | |
1686 | wake_up(&efx->flush_wq); | |
1687 | ||
1688 | WARN_ON(atomic_read(&efx->active_queues) < 0); | |
1689 | } | |
1690 | ||
1691 | static int efx_ef10_fini_dmaq(struct efx_nic *efx) | |
1692 | { | |
1693 | struct efx_ef10_nic_data *nic_data = efx->nic_data; | |
1694 | struct efx_channel *channel; | |
1695 | struct efx_tx_queue *tx_queue; | |
1696 | struct efx_rx_queue *rx_queue; | |
1697 | int pending; | |
1698 | ||
1699 | /* If the MC has just rebooted, the TX/RX queues will have already been | |
1700 | * torn down, but efx->active_queues needs to be set to zero. | |
1701 | */ | |
1702 | if (nic_data->must_realloc_vis) { | |
1703 | atomic_set(&efx->active_queues, 0); | |
1704 | return 0; | |
1705 | } | |
1706 | ||
1707 | /* Do not attempt to write to the NIC during EEH recovery */ | |
1708 | if (efx->state != STATE_RECOVERY) { | |
1709 | efx_for_each_channel(channel, efx) { | |
1710 | efx_for_each_channel_rx_queue(rx_queue, channel) | |
1711 | efx_ef10_rx_fini(rx_queue); | |
1712 | efx_for_each_channel_tx_queue(tx_queue, channel) | |
1713 | efx_ef10_tx_fini(tx_queue); | |
1714 | } | |
1715 | ||
1716 | wait_event_timeout(efx->flush_wq, | |
1717 | atomic_read(&efx->active_queues) == 0, | |
1718 | msecs_to_jiffies(EFX_MAX_FLUSH_TIME)); | |
1719 | pending = atomic_read(&efx->active_queues); | |
1720 | if (pending) { | |
1721 | netif_err(efx, hw, efx->net_dev, "failed to flush %d queues\n", | |
1722 | pending); | |
1723 | return -ETIMEDOUT; | |
1724 | } | |
1725 | } | |
1726 | ||
1727 | return 0; | |
1728 | } | |
1729 | ||
1730 | static bool efx_ef10_filter_equal(const struct efx_filter_spec *left, | |
1731 | const struct efx_filter_spec *right) | |
1732 | { | |
1733 | if ((left->match_flags ^ right->match_flags) | | |
1734 | ((left->flags ^ right->flags) & | |
1735 | (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_TX))) | |
1736 | return false; | |
1737 | ||
1738 | return memcmp(&left->outer_vid, &right->outer_vid, | |
1739 | sizeof(struct efx_filter_spec) - | |
1740 | offsetof(struct efx_filter_spec, outer_vid)) == 0; | |
1741 | } | |
1742 | ||
1743 | static unsigned int efx_ef10_filter_hash(const struct efx_filter_spec *spec) | |
1744 | { | |
1745 | BUILD_BUG_ON(offsetof(struct efx_filter_spec, outer_vid) & 3); | |
1746 | return jhash2((const u32 *)&spec->outer_vid, | |
1747 | (sizeof(struct efx_filter_spec) - | |
1748 | offsetof(struct efx_filter_spec, outer_vid)) / 4, | |
1749 | 0); | |
1750 | /* XXX should we randomise the initval? */ | |
1751 | } | |
1752 | ||
1753 | /* Decide whether a filter should be exclusive or else should allow | |
1754 | * delivery to additional recipients. Currently we decide that | |
1755 | * filters for specific local unicast MAC and IP addresses are | |
1756 | * exclusive. | |
1757 | */ | |
1758 | static bool efx_ef10_filter_is_exclusive(const struct efx_filter_spec *spec) | |
1759 | { | |
1760 | if (spec->match_flags & EFX_FILTER_MATCH_LOC_MAC && | |
1761 | !is_multicast_ether_addr(spec->loc_mac)) | |
1762 | return true; | |
1763 | ||
1764 | if ((spec->match_flags & | |
1765 | (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) == | |
1766 | (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) { | |
1767 | if (spec->ether_type == htons(ETH_P_IP) && | |
1768 | !ipv4_is_multicast(spec->loc_host[0])) | |
1769 | return true; | |
1770 | if (spec->ether_type == htons(ETH_P_IPV6) && | |
1771 | ((const u8 *)spec->loc_host)[0] != 0xff) | |
1772 | return true; | |
1773 | } | |
1774 | ||
1775 | return false; | |
1776 | } | |
1777 | ||
1778 | static struct efx_filter_spec * | |
1779 | efx_ef10_filter_entry_spec(const struct efx_ef10_filter_table *table, | |
1780 | unsigned int filter_idx) | |
1781 | { | |
1782 | return (struct efx_filter_spec *)(table->entry[filter_idx].spec & | |
1783 | ~EFX_EF10_FILTER_FLAGS); | |
1784 | } | |
1785 | ||
1786 | static unsigned int | |
1787 | efx_ef10_filter_entry_flags(const struct efx_ef10_filter_table *table, | |
1788 | unsigned int filter_idx) | |
1789 | { | |
1790 | return table->entry[filter_idx].spec & EFX_EF10_FILTER_FLAGS; | |
1791 | } | |
1792 | ||
1793 | static void | |
1794 | efx_ef10_filter_set_entry(struct efx_ef10_filter_table *table, | |
1795 | unsigned int filter_idx, | |
1796 | const struct efx_filter_spec *spec, | |
1797 | unsigned int flags) | |
1798 | { | |
1799 | table->entry[filter_idx].spec = (unsigned long)spec | flags; | |
1800 | } | |
1801 | ||
1802 | static void efx_ef10_filter_push_prep(struct efx_nic *efx, | |
1803 | const struct efx_filter_spec *spec, | |
1804 | efx_dword_t *inbuf, u64 handle, | |
1805 | bool replacing) | |
1806 | { | |
1807 | struct efx_ef10_nic_data *nic_data = efx->nic_data; | |
1808 | ||
1809 | memset(inbuf, 0, MC_CMD_FILTER_OP_IN_LEN); | |
1810 | ||
1811 | if (replacing) { | |
1812 | MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP, | |
1813 | MC_CMD_FILTER_OP_IN_OP_REPLACE); | |
1814 | MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE, handle); | |
1815 | } else { | |
1816 | u32 match_fields = 0; | |
1817 | ||
1818 | MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP, | |
1819 | efx_ef10_filter_is_exclusive(spec) ? | |
1820 | MC_CMD_FILTER_OP_IN_OP_INSERT : | |
1821 | MC_CMD_FILTER_OP_IN_OP_SUBSCRIBE); | |
1822 | ||
1823 | /* Convert match flags and values. Unlike almost | |
1824 | * everything else in MCDI, these fields are in | |
1825 | * network byte order. | |
1826 | */ | |
1827 | if (spec->match_flags & EFX_FILTER_MATCH_LOC_MAC_IG) | |
1828 | match_fields |= | |
1829 | is_multicast_ether_addr(spec->loc_mac) ? | |
1830 | 1 << MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_MCAST_DST_LBN : | |
1831 | 1 << MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_UCAST_DST_LBN; | |
1832 | #define COPY_FIELD(gen_flag, gen_field, mcdi_field) \ | |
1833 | if (spec->match_flags & EFX_FILTER_MATCH_ ## gen_flag) { \ | |
1834 | match_fields |= \ | |
1835 | 1 << MC_CMD_FILTER_OP_IN_MATCH_ ## \ | |
1836 | mcdi_field ## _LBN; \ | |
1837 | BUILD_BUG_ON( \ | |
1838 | MC_CMD_FILTER_OP_IN_ ## mcdi_field ## _LEN < \ | |
1839 | sizeof(spec->gen_field)); \ | |
1840 | memcpy(MCDI_PTR(inbuf, FILTER_OP_IN_ ## mcdi_field), \ | |
1841 | &spec->gen_field, sizeof(spec->gen_field)); \ | |
1842 | } | |
1843 | COPY_FIELD(REM_HOST, rem_host, SRC_IP); | |
1844 | COPY_FIELD(LOC_HOST, loc_host, DST_IP); | |
1845 | COPY_FIELD(REM_MAC, rem_mac, SRC_MAC); | |
1846 | COPY_FIELD(REM_PORT, rem_port, SRC_PORT); | |
1847 | COPY_FIELD(LOC_MAC, loc_mac, DST_MAC); | |
1848 | COPY_FIELD(LOC_PORT, loc_port, DST_PORT); | |
1849 | COPY_FIELD(ETHER_TYPE, ether_type, ETHER_TYPE); | |
1850 | COPY_FIELD(INNER_VID, inner_vid, INNER_VLAN); | |
1851 | COPY_FIELD(OUTER_VID, outer_vid, OUTER_VLAN); | |
1852 | COPY_FIELD(IP_PROTO, ip_proto, IP_PROTO); | |
1853 | #undef COPY_FIELD | |
1854 | MCDI_SET_DWORD(inbuf, FILTER_OP_IN_MATCH_FIELDS, | |
1855 | match_fields); | |
1856 | } | |
1857 | ||
1858 | MCDI_SET_DWORD(inbuf, FILTER_OP_IN_PORT_ID, EVB_PORT_ID_ASSIGNED); | |
1859 | MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_DEST, | |
1860 | spec->dmaq_id == EFX_FILTER_RX_DMAQ_ID_DROP ? | |
1861 | MC_CMD_FILTER_OP_IN_RX_DEST_DROP : | |
1862 | MC_CMD_FILTER_OP_IN_RX_DEST_HOST); | |
1863 | MCDI_SET_DWORD(inbuf, FILTER_OP_IN_TX_DEST, | |
1864 | MC_CMD_FILTER_OP_IN_TX_DEST_DEFAULT); | |
1865 | MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_QUEUE, spec->dmaq_id); | |
1866 | MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_MODE, | |
1867 | (spec->flags & EFX_FILTER_FLAG_RX_RSS) ? | |
1868 | MC_CMD_FILTER_OP_IN_RX_MODE_RSS : | |
1869 | MC_CMD_FILTER_OP_IN_RX_MODE_SIMPLE); | |
1870 | if (spec->flags & EFX_FILTER_FLAG_RX_RSS) | |
1871 | MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_CONTEXT, | |
1872 | spec->rss_context != | |
1873 | EFX_FILTER_RSS_CONTEXT_DEFAULT ? | |
1874 | spec->rss_context : nic_data->rx_rss_context); | |
1875 | } | |
1876 | ||
1877 | static int efx_ef10_filter_push(struct efx_nic *efx, | |
1878 | const struct efx_filter_spec *spec, | |
1879 | u64 *handle, bool replacing) | |
1880 | { | |
1881 | MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_IN_LEN); | |
1882 | MCDI_DECLARE_BUF(outbuf, MC_CMD_FILTER_OP_OUT_LEN); | |
1883 | int rc; | |
1884 | ||
1885 | efx_ef10_filter_push_prep(efx, spec, inbuf, *handle, replacing); | |
1886 | rc = efx_mcdi_rpc(efx, MC_CMD_FILTER_OP, inbuf, sizeof(inbuf), | |
1887 | outbuf, sizeof(outbuf), NULL); | |
1888 | if (rc == 0) | |
1889 | *handle = MCDI_QWORD(outbuf, FILTER_OP_OUT_HANDLE); | |
1890 | return rc; | |
1891 | } | |
1892 | ||
1893 | static int efx_ef10_filter_rx_match_pri(struct efx_ef10_filter_table *table, | |
1894 | enum efx_filter_match_flags match_flags) | |
1895 | { | |
1896 | unsigned int match_pri; | |
1897 | ||
1898 | for (match_pri = 0; | |
1899 | match_pri < table->rx_match_count; | |
1900 | match_pri++) | |
1901 | if (table->rx_match_flags[match_pri] == match_flags) | |
1902 | return match_pri; | |
1903 | ||
1904 | return -EPROTONOSUPPORT; | |
1905 | } | |
1906 | ||
1907 | static s32 efx_ef10_filter_insert(struct efx_nic *efx, | |
1908 | struct efx_filter_spec *spec, | |
1909 | bool replace_equal) | |
1910 | { | |
1911 | struct efx_ef10_filter_table *table = efx->filter_state; | |
1912 | DECLARE_BITMAP(mc_rem_map, EFX_EF10_FILTER_SEARCH_LIMIT); | |
1913 | struct efx_filter_spec *saved_spec; | |
1914 | unsigned int match_pri, hash; | |
1915 | unsigned int priv_flags; | |
1916 | bool replacing = false; | |
1917 | int ins_index = -1; | |
1918 | DEFINE_WAIT(wait); | |
1919 | bool is_mc_recip; | |
1920 | s32 rc; | |
1921 | ||
1922 | /* For now, only support RX filters */ | |
1923 | if ((spec->flags & (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_TX)) != | |
1924 | EFX_FILTER_FLAG_RX) | |
1925 | return -EINVAL; | |
1926 | ||
1927 | rc = efx_ef10_filter_rx_match_pri(table, spec->match_flags); | |
1928 | if (rc < 0) | |
1929 | return rc; | |
1930 | match_pri = rc; | |
1931 | ||
1932 | hash = efx_ef10_filter_hash(spec); | |
1933 | is_mc_recip = efx_filter_is_mc_recipient(spec); | |
1934 | if (is_mc_recip) | |
1935 | bitmap_zero(mc_rem_map, EFX_EF10_FILTER_SEARCH_LIMIT); | |
1936 | ||
1937 | /* Find any existing filters with the same match tuple or | |
1938 | * else a free slot to insert at. If any of them are busy, | |
1939 | * we have to wait and retry. | |
1940 | */ | |
1941 | for (;;) { | |
1942 | unsigned int depth = 1; | |
1943 | unsigned int i; | |
1944 | ||
1945 | spin_lock_bh(&efx->filter_lock); | |
1946 | ||
1947 | for (;;) { | |
1948 | i = (hash + depth) & (HUNT_FILTER_TBL_ROWS - 1); | |
1949 | saved_spec = efx_ef10_filter_entry_spec(table, i); | |
1950 | ||
1951 | if (!saved_spec) { | |
1952 | if (ins_index < 0) | |
1953 | ins_index = i; | |
1954 | } else if (efx_ef10_filter_equal(spec, saved_spec)) { | |
1955 | if (table->entry[i].spec & | |
1956 | EFX_EF10_FILTER_FLAG_BUSY) | |
1957 | break; | |
1958 | if (spec->priority < saved_spec->priority && | |
1959 | !(saved_spec->priority == | |
1960 | EFX_FILTER_PRI_REQUIRED && | |
1961 | saved_spec->flags & | |
1962 | EFX_FILTER_FLAG_RX_STACK)) { | |
1963 | rc = -EPERM; | |
1964 | goto out_unlock; | |
1965 | } | |
1966 | if (!is_mc_recip) { | |
1967 | /* This is the only one */ | |
1968 | if (spec->priority == | |
1969 | saved_spec->priority && | |
1970 | !replace_equal) { | |
1971 | rc = -EEXIST; | |
1972 | goto out_unlock; | |
1973 | } | |
1974 | ins_index = i; | |
1975 | goto found; | |
1976 | } else if (spec->priority > | |
1977 | saved_spec->priority || | |
1978 | (spec->priority == | |
1979 | saved_spec->priority && | |
1980 | replace_equal)) { | |
1981 | if (ins_index < 0) | |
1982 | ins_index = i; | |
1983 | else | |
1984 | __set_bit(depth, mc_rem_map); | |
1985 | } | |
1986 | } | |
1987 | ||
1988 | /* Once we reach the maximum search depth, use | |
1989 | * the first suitable slot or return -EBUSY if | |
1990 | * there was none | |
1991 | */ | |
1992 | if (depth == EFX_EF10_FILTER_SEARCH_LIMIT) { | |
1993 | if (ins_index < 0) { | |
1994 | rc = -EBUSY; | |
1995 | goto out_unlock; | |
1996 | } | |
1997 | goto found; | |
1998 | } | |
1999 | ||
2000 | ++depth; | |
2001 | } | |
2002 | ||
2003 | prepare_to_wait(&table->waitq, &wait, TASK_UNINTERRUPTIBLE); | |
2004 | spin_unlock_bh(&efx->filter_lock); | |
2005 | schedule(); | |
2006 | } | |
2007 | ||
2008 | found: | |
2009 | /* Create a software table entry if necessary, and mark it | |
2010 | * busy. We might yet fail to insert, but any attempt to | |
2011 | * insert a conflicting filter while we're waiting for the | |
2012 | * firmware must find the busy entry. | |
2013 | */ | |
2014 | saved_spec = efx_ef10_filter_entry_spec(table, ins_index); | |
2015 | if (saved_spec) { | |
2016 | if (spec->flags & EFX_FILTER_FLAG_RX_STACK) { | |
2017 | /* Just make sure it won't be removed */ | |
2018 | saved_spec->flags |= EFX_FILTER_FLAG_RX_STACK; | |
2019 | table->entry[ins_index].spec &= | |
2020 | ~EFX_EF10_FILTER_FLAG_STACK_OLD; | |
2021 | rc = ins_index; | |
2022 | goto out_unlock; | |
2023 | } | |
2024 | replacing = true; | |
2025 | priv_flags = efx_ef10_filter_entry_flags(table, ins_index); | |
2026 | } else { | |
2027 | saved_spec = kmalloc(sizeof(*spec), GFP_ATOMIC); | |
2028 | if (!saved_spec) { | |
2029 | rc = -ENOMEM; | |
2030 | goto out_unlock; | |
2031 | } | |
2032 | *saved_spec = *spec; | |
2033 | priv_flags = 0; | |
2034 | } | |
2035 | efx_ef10_filter_set_entry(table, ins_index, saved_spec, | |
2036 | priv_flags | EFX_EF10_FILTER_FLAG_BUSY); | |
2037 | ||
2038 | /* Mark lower-priority multicast recipients busy prior to removal */ | |
2039 | if (is_mc_recip) { | |
2040 | unsigned int depth, i; | |
2041 | ||
2042 | for (depth = 0; depth < EFX_EF10_FILTER_SEARCH_LIMIT; depth++) { | |
2043 | i = (hash + depth) & (HUNT_FILTER_TBL_ROWS - 1); | |
2044 | if (test_bit(depth, mc_rem_map)) | |
2045 | table->entry[i].spec |= | |
2046 | EFX_EF10_FILTER_FLAG_BUSY; | |
2047 | } | |
2048 | } | |
2049 | ||
2050 | spin_unlock_bh(&efx->filter_lock); | |
2051 | ||
2052 | rc = efx_ef10_filter_push(efx, spec, &table->entry[ins_index].handle, | |
2053 | replacing); | |
2054 | ||
2055 | /* Finalise the software table entry */ | |
2056 | spin_lock_bh(&efx->filter_lock); | |
2057 | if (rc == 0) { | |
2058 | if (replacing) { | |
2059 | /* Update the fields that may differ */ | |
2060 | saved_spec->priority = spec->priority; | |
2061 | saved_spec->flags &= EFX_FILTER_FLAG_RX_STACK; | |
2062 | saved_spec->flags |= spec->flags; | |
2063 | saved_spec->rss_context = spec->rss_context; | |
2064 | saved_spec->dmaq_id = spec->dmaq_id; | |
2065 | } | |
2066 | } else if (!replacing) { | |
2067 | kfree(saved_spec); | |
2068 | saved_spec = NULL; | |
2069 | } | |
2070 | efx_ef10_filter_set_entry(table, ins_index, saved_spec, priv_flags); | |
2071 | ||
2072 | /* Remove and finalise entries for lower-priority multicast | |
2073 | * recipients | |
2074 | */ | |
2075 | if (is_mc_recip) { | |
2076 | MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_IN_LEN); | |
2077 | unsigned int depth, i; | |
2078 | ||
2079 | memset(inbuf, 0, sizeof(inbuf)); | |
2080 | ||
2081 | for (depth = 0; depth < EFX_EF10_FILTER_SEARCH_LIMIT; depth++) { | |
2082 | if (!test_bit(depth, mc_rem_map)) | |
2083 | continue; | |
2084 | ||
2085 | i = (hash + depth) & (HUNT_FILTER_TBL_ROWS - 1); | |
2086 | saved_spec = efx_ef10_filter_entry_spec(table, i); | |
2087 | priv_flags = efx_ef10_filter_entry_flags(table, i); | |
2088 | ||
2089 | if (rc == 0) { | |
2090 | spin_unlock_bh(&efx->filter_lock); | |
2091 | MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP, | |
2092 | MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE); | |
2093 | MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE, | |
2094 | table->entry[i].handle); | |
2095 | rc = efx_mcdi_rpc(efx, MC_CMD_FILTER_OP, | |
2096 | inbuf, sizeof(inbuf), | |
2097 | NULL, 0, NULL); | |
2098 | spin_lock_bh(&efx->filter_lock); | |
2099 | } | |
2100 | ||
2101 | if (rc == 0) { | |
2102 | kfree(saved_spec); | |
2103 | saved_spec = NULL; | |
2104 | priv_flags = 0; | |
2105 | } else { | |
2106 | priv_flags &= ~EFX_EF10_FILTER_FLAG_BUSY; | |
2107 | } | |
2108 | efx_ef10_filter_set_entry(table, i, saved_spec, | |
2109 | priv_flags); | |
2110 | } | |
2111 | } | |
2112 | ||
2113 | /* If successful, return the inserted filter ID */ | |
2114 | if (rc == 0) | |
2115 | rc = match_pri * HUNT_FILTER_TBL_ROWS + ins_index; | |
2116 | ||
2117 | wake_up_all(&table->waitq); | |
2118 | out_unlock: | |
2119 | spin_unlock_bh(&efx->filter_lock); | |
2120 | finish_wait(&table->waitq, &wait); | |
2121 | return rc; | |
2122 | } | |
2123 | ||
2124 | void efx_ef10_filter_update_rx_scatter(struct efx_nic *efx) | |
2125 | { | |
2126 | /* no need to do anything here on EF10 */ | |
2127 | } | |
2128 | ||
2129 | /* Remove a filter. | |
2130 | * If !stack_requested, remove by ID | |
2131 | * If stack_requested, remove by index | |
2132 | * Filter ID may come from userland and must be range-checked. | |
2133 | */ | |
2134 | static int efx_ef10_filter_remove_internal(struct efx_nic *efx, | |
2135 | enum efx_filter_priority priority, | |
2136 | u32 filter_id, bool stack_requested) | |
2137 | { | |
2138 | unsigned int filter_idx = filter_id % HUNT_FILTER_TBL_ROWS; | |
2139 | struct efx_ef10_filter_table *table = efx->filter_state; | |
2140 | MCDI_DECLARE_BUF(inbuf, | |
2141 | MC_CMD_FILTER_OP_IN_HANDLE_OFST + | |
2142 | MC_CMD_FILTER_OP_IN_HANDLE_LEN); | |
2143 | struct efx_filter_spec *spec; | |
2144 | DEFINE_WAIT(wait); | |
2145 | int rc; | |
2146 | ||
2147 | /* Find the software table entry and mark it busy. Don't | |
2148 | * remove it yet; any attempt to update while we're waiting | |
2149 | * for the firmware must find the busy entry. | |
2150 | */ | |
2151 | for (;;) { | |
2152 | spin_lock_bh(&efx->filter_lock); | |
2153 | if (!(table->entry[filter_idx].spec & | |
2154 | EFX_EF10_FILTER_FLAG_BUSY)) | |
2155 | break; | |
2156 | prepare_to_wait(&table->waitq, &wait, TASK_UNINTERRUPTIBLE); | |
2157 | spin_unlock_bh(&efx->filter_lock); | |
2158 | schedule(); | |
2159 | } | |
2160 | spec = efx_ef10_filter_entry_spec(table, filter_idx); | |
2161 | if (!spec || spec->priority > priority || | |
2162 | (!stack_requested && | |
2163 | efx_ef10_filter_rx_match_pri(table, spec->match_flags) != | |
2164 | filter_id / HUNT_FILTER_TBL_ROWS)) { | |
2165 | rc = -ENOENT; | |
2166 | goto out_unlock; | |
2167 | } | |
2168 | table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_BUSY; | |
2169 | spin_unlock_bh(&efx->filter_lock); | |
2170 | ||
2171 | if (spec->flags & EFX_FILTER_FLAG_RX_STACK && !stack_requested) { | |
2172 | /* Reset steering of a stack-owned filter */ | |
2173 | ||
2174 | struct efx_filter_spec new_spec = *spec; | |
2175 | ||
2176 | new_spec.priority = EFX_FILTER_PRI_REQUIRED; | |
2177 | new_spec.flags = (EFX_FILTER_FLAG_RX | | |
2178 | EFX_FILTER_FLAG_RX_RSS | | |
2179 | EFX_FILTER_FLAG_RX_STACK); | |
2180 | new_spec.dmaq_id = 0; | |
2181 | new_spec.rss_context = EFX_FILTER_RSS_CONTEXT_DEFAULT; | |
2182 | rc = efx_ef10_filter_push(efx, &new_spec, | |
2183 | &table->entry[filter_idx].handle, | |
2184 | true); | |
2185 | ||
2186 | spin_lock_bh(&efx->filter_lock); | |
2187 | if (rc == 0) | |
2188 | *spec = new_spec; | |
2189 | } else { | |
2190 | /* Really remove the filter */ | |
2191 | ||
2192 | MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP, | |
2193 | efx_ef10_filter_is_exclusive(spec) ? | |
2194 | MC_CMD_FILTER_OP_IN_OP_REMOVE : | |
2195 | MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE); | |
2196 | MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE, | |
2197 | table->entry[filter_idx].handle); | |
2198 | rc = efx_mcdi_rpc(efx, MC_CMD_FILTER_OP, | |
2199 | inbuf, sizeof(inbuf), NULL, 0, NULL); | |
2200 | ||
2201 | spin_lock_bh(&efx->filter_lock); | |
2202 | if (rc == 0) { | |
2203 | kfree(spec); | |
2204 | efx_ef10_filter_set_entry(table, filter_idx, NULL, 0); | |
2205 | } | |
2206 | } | |
2207 | table->entry[filter_idx].spec &= ~EFX_EF10_FILTER_FLAG_BUSY; | |
2208 | wake_up_all(&table->waitq); | |
2209 | out_unlock: | |
2210 | spin_unlock_bh(&efx->filter_lock); | |
2211 | finish_wait(&table->waitq, &wait); | |
2212 | return rc; | |
2213 | } | |
2214 | ||
2215 | static int efx_ef10_filter_remove_safe(struct efx_nic *efx, | |
2216 | enum efx_filter_priority priority, | |
2217 | u32 filter_id) | |
2218 | { | |
2219 | return efx_ef10_filter_remove_internal(efx, priority, filter_id, false); | |
2220 | } | |
2221 | ||
2222 | static int efx_ef10_filter_get_safe(struct efx_nic *efx, | |
2223 | enum efx_filter_priority priority, | |
2224 | u32 filter_id, struct efx_filter_spec *spec) | |
2225 | { | |
2226 | unsigned int filter_idx = filter_id % HUNT_FILTER_TBL_ROWS; | |
2227 | struct efx_ef10_filter_table *table = efx->filter_state; | |
2228 | const struct efx_filter_spec *saved_spec; | |
2229 | int rc; | |
2230 | ||
2231 | spin_lock_bh(&efx->filter_lock); | |
2232 | saved_spec = efx_ef10_filter_entry_spec(table, filter_idx); | |
2233 | if (saved_spec && saved_spec->priority == priority && | |
2234 | efx_ef10_filter_rx_match_pri(table, saved_spec->match_flags) == | |
2235 | filter_id / HUNT_FILTER_TBL_ROWS) { | |
2236 | *spec = *saved_spec; | |
2237 | rc = 0; | |
2238 | } else { | |
2239 | rc = -ENOENT; | |
2240 | } | |
2241 | spin_unlock_bh(&efx->filter_lock); | |
2242 | return rc; | |
2243 | } | |
2244 | ||
2245 | static void efx_ef10_filter_clear_rx(struct efx_nic *efx, | |
2246 | enum efx_filter_priority priority) | |
2247 | { | |
2248 | /* TODO */ | |
2249 | } | |
2250 | ||
2251 | static u32 efx_ef10_filter_count_rx_used(struct efx_nic *efx, | |
2252 | enum efx_filter_priority priority) | |
2253 | { | |
2254 | struct efx_ef10_filter_table *table = efx->filter_state; | |
2255 | unsigned int filter_idx; | |
2256 | s32 count = 0; | |
2257 | ||
2258 | spin_lock_bh(&efx->filter_lock); | |
2259 | for (filter_idx = 0; filter_idx < HUNT_FILTER_TBL_ROWS; filter_idx++) { | |
2260 | if (table->entry[filter_idx].spec && | |
2261 | efx_ef10_filter_entry_spec(table, filter_idx)->priority == | |
2262 | priority) | |
2263 | ++count; | |
2264 | } | |
2265 | spin_unlock_bh(&efx->filter_lock); | |
2266 | return count; | |
2267 | } | |
2268 | ||
2269 | static u32 efx_ef10_filter_get_rx_id_limit(struct efx_nic *efx) | |
2270 | { | |
2271 | struct efx_ef10_filter_table *table = efx->filter_state; | |
2272 | ||
2273 | return table->rx_match_count * HUNT_FILTER_TBL_ROWS; | |
2274 | } | |
2275 | ||
2276 | static s32 efx_ef10_filter_get_rx_ids(struct efx_nic *efx, | |
2277 | enum efx_filter_priority priority, | |
2278 | u32 *buf, u32 size) | |
2279 | { | |
2280 | struct efx_ef10_filter_table *table = efx->filter_state; | |
2281 | struct efx_filter_spec *spec; | |
2282 | unsigned int filter_idx; | |
2283 | s32 count = 0; | |
2284 | ||
2285 | spin_lock_bh(&efx->filter_lock); | |
2286 | for (filter_idx = 0; filter_idx < HUNT_FILTER_TBL_ROWS; filter_idx++) { | |
2287 | spec = efx_ef10_filter_entry_spec(table, filter_idx); | |
2288 | if (spec && spec->priority == priority) { | |
2289 | if (count == size) { | |
2290 | count = -EMSGSIZE; | |
2291 | break; | |
2292 | } | |
2293 | buf[count++] = (efx_ef10_filter_rx_match_pri( | |
2294 | table, spec->match_flags) * | |
2295 | HUNT_FILTER_TBL_ROWS + | |
2296 | filter_idx); | |
2297 | } | |
2298 | } | |
2299 | spin_unlock_bh(&efx->filter_lock); | |
2300 | return count; | |
2301 | } | |
2302 | ||
2303 | #ifdef CONFIG_RFS_ACCEL | |
2304 | ||
2305 | static efx_mcdi_async_completer efx_ef10_filter_rfs_insert_complete; | |
2306 | ||
2307 | static s32 efx_ef10_filter_rfs_insert(struct efx_nic *efx, | |
2308 | struct efx_filter_spec *spec) | |
2309 | { | |
2310 | struct efx_ef10_filter_table *table = efx->filter_state; | |
2311 | MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_IN_LEN); | |
2312 | struct efx_filter_spec *saved_spec; | |
2313 | unsigned int hash, i, depth = 1; | |
2314 | bool replacing = false; | |
2315 | int ins_index = -1; | |
2316 | u64 cookie; | |
2317 | s32 rc; | |
2318 | ||
2319 | /* Must be an RX filter without RSS and not for a multicast | |
2320 | * destination address (RFS only works for connected sockets). | |
2321 | * These restrictions allow us to pass only a tiny amount of | |
2322 | * data through to the completion function. | |
2323 | */ | |
2324 | EFX_WARN_ON_PARANOID(spec->flags != | |
2325 | (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_RX_SCATTER)); | |
2326 | EFX_WARN_ON_PARANOID(spec->priority != EFX_FILTER_PRI_HINT); | |
2327 | EFX_WARN_ON_PARANOID(efx_filter_is_mc_recipient(spec)); | |
2328 | ||
2329 | hash = efx_ef10_filter_hash(spec); | |
2330 | ||
2331 | spin_lock_bh(&efx->filter_lock); | |
2332 | ||
2333 | /* Find any existing filter with the same match tuple or else | |
2334 | * a free slot to insert at. If an existing filter is busy, | |
2335 | * we have to give up. | |
2336 | */ | |
2337 | for (;;) { | |
2338 | i = (hash + depth) & (HUNT_FILTER_TBL_ROWS - 1); | |
2339 | saved_spec = efx_ef10_filter_entry_spec(table, i); | |
2340 | ||
2341 | if (!saved_spec) { | |
2342 | if (ins_index < 0) | |
2343 | ins_index = i; | |
2344 | } else if (efx_ef10_filter_equal(spec, saved_spec)) { | |
2345 | if (table->entry[i].spec & EFX_EF10_FILTER_FLAG_BUSY) { | |
2346 | rc = -EBUSY; | |
2347 | goto fail_unlock; | |
2348 | } | |
2349 | EFX_WARN_ON_PARANOID(saved_spec->flags & | |
2350 | EFX_FILTER_FLAG_RX_STACK); | |
2351 | if (spec->priority < saved_spec->priority) { | |
2352 | rc = -EPERM; | |
2353 | goto fail_unlock; | |
2354 | } | |
2355 | ins_index = i; | |
2356 | break; | |
2357 | } | |
2358 | ||
2359 | /* Once we reach the maximum search depth, use the | |
2360 | * first suitable slot or return -EBUSY if there was | |
2361 | * none | |
2362 | */ | |
2363 | if (depth == EFX_EF10_FILTER_SEARCH_LIMIT) { | |
2364 | if (ins_index < 0) { | |
2365 | rc = -EBUSY; | |
2366 | goto fail_unlock; | |
2367 | } | |
2368 | break; | |
2369 | } | |
2370 | ||
2371 | ++depth; | |
2372 | } | |
2373 | ||
2374 | /* Create a software table entry if necessary, and mark it | |
2375 | * busy. We might yet fail to insert, but any attempt to | |
2376 | * insert a conflicting filter while we're waiting for the | |
2377 | * firmware must find the busy entry. | |
2378 | */ | |
2379 | saved_spec = efx_ef10_filter_entry_spec(table, ins_index); | |
2380 | if (saved_spec) { | |
2381 | replacing = true; | |
2382 | } else { | |
2383 | saved_spec = kmalloc(sizeof(*spec), GFP_ATOMIC); | |
2384 | if (!saved_spec) { | |
2385 | rc = -ENOMEM; | |
2386 | goto fail_unlock; | |
2387 | } | |
2388 | *saved_spec = *spec; | |
2389 | } | |
2390 | efx_ef10_filter_set_entry(table, ins_index, saved_spec, | |
2391 | EFX_EF10_FILTER_FLAG_BUSY); | |
2392 | ||
2393 | spin_unlock_bh(&efx->filter_lock); | |
2394 | ||
2395 | /* Pack up the variables needed on completion */ | |
2396 | cookie = replacing << 31 | ins_index << 16 | spec->dmaq_id; | |
2397 | ||
2398 | efx_ef10_filter_push_prep(efx, spec, inbuf, | |
2399 | table->entry[ins_index].handle, replacing); | |
2400 | efx_mcdi_rpc_async(efx, MC_CMD_FILTER_OP, inbuf, sizeof(inbuf), | |
2401 | MC_CMD_FILTER_OP_OUT_LEN, | |
2402 | efx_ef10_filter_rfs_insert_complete, cookie); | |
2403 | ||
2404 | return ins_index; | |
2405 | ||
2406 | fail_unlock: | |
2407 | spin_unlock_bh(&efx->filter_lock); | |
2408 | return rc; | |
2409 | } | |
2410 | ||
2411 | static void | |
2412 | efx_ef10_filter_rfs_insert_complete(struct efx_nic *efx, unsigned long cookie, | |
2413 | int rc, efx_dword_t *outbuf, | |
2414 | size_t outlen_actual) | |
2415 | { | |
2416 | struct efx_ef10_filter_table *table = efx->filter_state; | |
2417 | unsigned int ins_index, dmaq_id; | |
2418 | struct efx_filter_spec *spec; | |
2419 | bool replacing; | |
2420 | ||
2421 | /* Unpack the cookie */ | |
2422 | replacing = cookie >> 31; | |
2423 | ins_index = (cookie >> 16) & (HUNT_FILTER_TBL_ROWS - 1); | |
2424 | dmaq_id = cookie & 0xffff; | |
2425 | ||
2426 | spin_lock_bh(&efx->filter_lock); | |
2427 | spec = efx_ef10_filter_entry_spec(table, ins_index); | |
2428 | if (rc == 0) { | |
2429 | table->entry[ins_index].handle = | |
2430 | MCDI_QWORD(outbuf, FILTER_OP_OUT_HANDLE); | |
2431 | if (replacing) | |
2432 | spec->dmaq_id = dmaq_id; | |
2433 | } else if (!replacing) { | |
2434 | kfree(spec); | |
2435 | spec = NULL; | |
2436 | } | |
2437 | efx_ef10_filter_set_entry(table, ins_index, spec, 0); | |
2438 | spin_unlock_bh(&efx->filter_lock); | |
2439 | ||
2440 | wake_up_all(&table->waitq); | |
2441 | } | |
2442 | ||
2443 | static void | |
2444 | efx_ef10_filter_rfs_expire_complete(struct efx_nic *efx, | |
2445 | unsigned long filter_idx, | |
2446 | int rc, efx_dword_t *outbuf, | |
2447 | size_t outlen_actual); | |
2448 | ||
2449 | static bool efx_ef10_filter_rfs_expire_one(struct efx_nic *efx, u32 flow_id, | |
2450 | unsigned int filter_idx) | |
2451 | { | |
2452 | struct efx_ef10_filter_table *table = efx->filter_state; | |
2453 | struct efx_filter_spec *spec = | |
2454 | efx_ef10_filter_entry_spec(table, filter_idx); | |
2455 | MCDI_DECLARE_BUF(inbuf, | |
2456 | MC_CMD_FILTER_OP_IN_HANDLE_OFST + | |
2457 | MC_CMD_FILTER_OP_IN_HANDLE_LEN); | |
2458 | ||
2459 | if (!spec || | |
2460 | (table->entry[filter_idx].spec & EFX_EF10_FILTER_FLAG_BUSY) || | |
2461 | spec->priority != EFX_FILTER_PRI_HINT || | |
2462 | !rps_may_expire_flow(efx->net_dev, spec->dmaq_id, | |
2463 | flow_id, filter_idx)) | |
2464 | return false; | |
2465 | ||
2466 | MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP, | |
2467 | MC_CMD_FILTER_OP_IN_OP_REMOVE); | |
2468 | MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE, | |
2469 | table->entry[filter_idx].handle); | |
2470 | if (efx_mcdi_rpc_async(efx, MC_CMD_FILTER_OP, inbuf, sizeof(inbuf), 0, | |
2471 | efx_ef10_filter_rfs_expire_complete, filter_idx)) | |
2472 | return false; | |
2473 | ||
2474 | table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_BUSY; | |
2475 | return true; | |
2476 | } | |
2477 | ||
2478 | static void | |
2479 | efx_ef10_filter_rfs_expire_complete(struct efx_nic *efx, | |
2480 | unsigned long filter_idx, | |
2481 | int rc, efx_dword_t *outbuf, | |
2482 | size_t outlen_actual) | |
2483 | { | |
2484 | struct efx_ef10_filter_table *table = efx->filter_state; | |
2485 | struct efx_filter_spec *spec = | |
2486 | efx_ef10_filter_entry_spec(table, filter_idx); | |
2487 | ||
2488 | spin_lock_bh(&efx->filter_lock); | |
2489 | if (rc == 0) { | |
2490 | kfree(spec); | |
2491 | efx_ef10_filter_set_entry(table, filter_idx, NULL, 0); | |
2492 | } | |
2493 | table->entry[filter_idx].spec &= ~EFX_EF10_FILTER_FLAG_BUSY; | |
2494 | wake_up_all(&table->waitq); | |
2495 | spin_unlock_bh(&efx->filter_lock); | |
2496 | } | |
2497 | ||
2498 | #endif /* CONFIG_RFS_ACCEL */ | |
2499 | ||
2500 | static int efx_ef10_filter_match_flags_from_mcdi(u32 mcdi_flags) | |
2501 | { | |
2502 | int match_flags = 0; | |
2503 | ||
2504 | #define MAP_FLAG(gen_flag, mcdi_field) { \ | |
2505 | u32 old_mcdi_flags = mcdi_flags; \ | |
2506 | mcdi_flags &= ~(1 << MC_CMD_FILTER_OP_IN_MATCH_ ## \ | |
2507 | mcdi_field ## _LBN); \ | |
2508 | if (mcdi_flags != old_mcdi_flags) \ | |
2509 | match_flags |= EFX_FILTER_MATCH_ ## gen_flag; \ | |
2510 | } | |
2511 | MAP_FLAG(LOC_MAC_IG, UNKNOWN_UCAST_DST); | |
2512 | MAP_FLAG(LOC_MAC_IG, UNKNOWN_MCAST_DST); | |
2513 | MAP_FLAG(REM_HOST, SRC_IP); | |
2514 | MAP_FLAG(LOC_HOST, DST_IP); | |
2515 | MAP_FLAG(REM_MAC, SRC_MAC); | |
2516 | MAP_FLAG(REM_PORT, SRC_PORT); | |
2517 | MAP_FLAG(LOC_MAC, DST_MAC); | |
2518 | MAP_FLAG(LOC_PORT, DST_PORT); | |
2519 | MAP_FLAG(ETHER_TYPE, ETHER_TYPE); | |
2520 | MAP_FLAG(INNER_VID, INNER_VLAN); | |
2521 | MAP_FLAG(OUTER_VID, OUTER_VLAN); | |
2522 | MAP_FLAG(IP_PROTO, IP_PROTO); | |
2523 | #undef MAP_FLAG | |
2524 | ||
2525 | /* Did we map them all? */ | |
2526 | if (mcdi_flags) | |
2527 | return -EINVAL; | |
2528 | ||
2529 | return match_flags; | |
2530 | } | |
2531 | ||
2532 | static int efx_ef10_filter_table_probe(struct efx_nic *efx) | |
2533 | { | |
2534 | MCDI_DECLARE_BUF(inbuf, MC_CMD_GET_PARSER_DISP_INFO_IN_LEN); | |
2535 | MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_PARSER_DISP_INFO_OUT_LENMAX); | |
2536 | unsigned int pd_match_pri, pd_match_count; | |
2537 | struct efx_ef10_filter_table *table; | |
2538 | size_t outlen; | |
2539 | int rc; | |
2540 | ||
2541 | table = kzalloc(sizeof(*table), GFP_KERNEL); | |
2542 | if (!table) | |
2543 | return -ENOMEM; | |
2544 | ||
2545 | /* Find out which RX filter types are supported, and their priorities */ | |
2546 | MCDI_SET_DWORD(inbuf, GET_PARSER_DISP_INFO_IN_OP, | |
2547 | MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SUPPORTED_RX_MATCHES); | |
2548 | rc = efx_mcdi_rpc(efx, MC_CMD_GET_PARSER_DISP_INFO, | |
2549 | inbuf, sizeof(inbuf), outbuf, sizeof(outbuf), | |
2550 | &outlen); | |
2551 | if (rc) | |
2552 | goto fail; | |
2553 | pd_match_count = MCDI_VAR_ARRAY_LEN( | |
2554 | outlen, GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES); | |
2555 | table->rx_match_count = 0; | |
2556 | ||
2557 | for (pd_match_pri = 0; pd_match_pri < pd_match_count; pd_match_pri++) { | |
2558 | u32 mcdi_flags = | |
2559 | MCDI_ARRAY_DWORD( | |
2560 | outbuf, | |
2561 | GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES, | |
2562 | pd_match_pri); | |
2563 | rc = efx_ef10_filter_match_flags_from_mcdi(mcdi_flags); | |
2564 | if (rc < 0) { | |
2565 | netif_dbg(efx, probe, efx->net_dev, | |
2566 | "%s: fw flags %#x pri %u not supported in driver\n", | |
2567 | __func__, mcdi_flags, pd_match_pri); | |
2568 | } else { | |
2569 | netif_dbg(efx, probe, efx->net_dev, | |
2570 | "%s: fw flags %#x pri %u supported as driver flags %#x pri %u\n", | |
2571 | __func__, mcdi_flags, pd_match_pri, | |
2572 | rc, table->rx_match_count); | |
2573 | table->rx_match_flags[table->rx_match_count++] = rc; | |
2574 | } | |
2575 | } | |
2576 | ||
2577 | table->entry = vzalloc(HUNT_FILTER_TBL_ROWS * sizeof(*table->entry)); | |
2578 | if (!table->entry) { | |
2579 | rc = -ENOMEM; | |
2580 | goto fail; | |
2581 | } | |
2582 | ||
2583 | efx->filter_state = table; | |
2584 | init_waitqueue_head(&table->waitq); | |
2585 | return 0; | |
2586 | ||
2587 | fail: | |
2588 | kfree(table); | |
2589 | return rc; | |
2590 | } | |
2591 | ||
2592 | static void efx_ef10_filter_table_restore(struct efx_nic *efx) | |
2593 | { | |
2594 | struct efx_ef10_filter_table *table = efx->filter_state; | |
2595 | struct efx_ef10_nic_data *nic_data = efx->nic_data; | |
2596 | struct efx_filter_spec *spec; | |
2597 | unsigned int filter_idx; | |
2598 | bool failed = false; | |
2599 | int rc; | |
2600 | ||
2601 | if (!nic_data->must_restore_filters) | |
2602 | return; | |
2603 | ||
2604 | spin_lock_bh(&efx->filter_lock); | |
2605 | ||
2606 | for (filter_idx = 0; filter_idx < HUNT_FILTER_TBL_ROWS; filter_idx++) { | |
2607 | spec = efx_ef10_filter_entry_spec(table, filter_idx); | |
2608 | if (!spec) | |
2609 | continue; | |
2610 | ||
2611 | table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_BUSY; | |
2612 | spin_unlock_bh(&efx->filter_lock); | |
2613 | ||
2614 | rc = efx_ef10_filter_push(efx, spec, | |
2615 | &table->entry[filter_idx].handle, | |
2616 | false); | |
2617 | if (rc) | |
2618 | failed = true; | |
2619 | ||
2620 | spin_lock_bh(&efx->filter_lock); | |
2621 | if (rc) { | |
2622 | kfree(spec); | |
2623 | efx_ef10_filter_set_entry(table, filter_idx, NULL, 0); | |
2624 | } else { | |
2625 | table->entry[filter_idx].spec &= | |
2626 | ~EFX_EF10_FILTER_FLAG_BUSY; | |
2627 | } | |
2628 | } | |
2629 | ||
2630 | spin_unlock_bh(&efx->filter_lock); | |
2631 | ||
2632 | if (failed) | |
2633 | netif_err(efx, hw, efx->net_dev, | |
2634 | "unable to restore all filters\n"); | |
2635 | else | |
2636 | nic_data->must_restore_filters = false; | |
2637 | } | |
2638 | ||
2639 | static void efx_ef10_filter_table_remove(struct efx_nic *efx) | |
2640 | { | |
2641 | struct efx_ef10_filter_table *table = efx->filter_state; | |
2642 | MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_IN_LEN); | |
2643 | struct efx_filter_spec *spec; | |
2644 | unsigned int filter_idx; | |
2645 | int rc; | |
2646 | ||
2647 | for (filter_idx = 0; filter_idx < HUNT_FILTER_TBL_ROWS; filter_idx++) { | |
2648 | spec = efx_ef10_filter_entry_spec(table, filter_idx); | |
2649 | if (!spec) | |
2650 | continue; | |
2651 | ||
2652 | MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP, | |
2653 | efx_ef10_filter_is_exclusive(spec) ? | |
2654 | MC_CMD_FILTER_OP_IN_OP_REMOVE : | |
2655 | MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE); | |
2656 | MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE, | |
2657 | table->entry[filter_idx].handle); | |
2658 | rc = efx_mcdi_rpc(efx, MC_CMD_FILTER_OP, inbuf, sizeof(inbuf), | |
2659 | NULL, 0, NULL); | |
2660 | ||
2661 | WARN_ON(rc != 0); | |
2662 | kfree(spec); | |
2663 | } | |
2664 | ||
2665 | vfree(table->entry); | |
2666 | kfree(table); | |
2667 | } | |
2668 | ||
2669 | static void efx_ef10_filter_sync_rx_mode(struct efx_nic *efx) | |
2670 | { | |
2671 | struct efx_ef10_filter_table *table = efx->filter_state; | |
2672 | struct net_device *net_dev = efx->net_dev; | |
2673 | struct efx_filter_spec spec; | |
2674 | bool remove_failed = false; | |
2675 | struct netdev_hw_addr *uc; | |
2676 | struct netdev_hw_addr *mc; | |
2677 | unsigned int filter_idx; | |
2678 | int i, n, rc; | |
2679 | ||
2680 | if (!efx_dev_registered(efx)) | |
2681 | return; | |
2682 | ||
2683 | /* Mark old filters that may need to be removed */ | |
2684 | spin_lock_bh(&efx->filter_lock); | |
2685 | n = table->stack_uc_count < 0 ? 1 : table->stack_uc_count; | |
2686 | for (i = 0; i < n; i++) { | |
2687 | filter_idx = table->stack_uc_list[i].id % HUNT_FILTER_TBL_ROWS; | |
2688 | table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_STACK_OLD; | |
2689 | } | |
2690 | n = table->stack_mc_count < 0 ? 1 : table->stack_mc_count; | |
2691 | for (i = 0; i < n; i++) { | |
2692 | filter_idx = table->stack_mc_list[i].id % HUNT_FILTER_TBL_ROWS; | |
2693 | table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_STACK_OLD; | |
2694 | } | |
2695 | spin_unlock_bh(&efx->filter_lock); | |
2696 | ||
2697 | /* Copy/convert the address lists; add the primary station | |
2698 | * address and broadcast address | |
2699 | */ | |
2700 | netif_addr_lock_bh(net_dev); | |
2701 | if (net_dev->flags & IFF_PROMISC || | |
2702 | netdev_uc_count(net_dev) >= EFX_EF10_FILTER_STACK_UC_MAX) { | |
2703 | table->stack_uc_count = -1; | |
2704 | } else { | |
2705 | table->stack_uc_count = 1 + netdev_uc_count(net_dev); | |
2706 | memcpy(table->stack_uc_list[0].addr, net_dev->dev_addr, | |
2707 | ETH_ALEN); | |
2708 | i = 1; | |
2709 | netdev_for_each_uc_addr(uc, net_dev) { | |
2710 | memcpy(table->stack_uc_list[i].addr, | |
2711 | uc->addr, ETH_ALEN); | |
2712 | i++; | |
2713 | } | |
2714 | } | |
2715 | if (net_dev->flags & (IFF_PROMISC | IFF_ALLMULTI) || | |
2716 | netdev_mc_count(net_dev) >= EFX_EF10_FILTER_STACK_MC_MAX) { | |
2717 | table->stack_mc_count = -1; | |
2718 | } else { | |
2719 | table->stack_mc_count = 1 + netdev_mc_count(net_dev); | |
2720 | eth_broadcast_addr(table->stack_mc_list[0].addr); | |
2721 | i = 1; | |
2722 | netdev_for_each_mc_addr(mc, net_dev) { | |
2723 | memcpy(table->stack_mc_list[i].addr, | |
2724 | mc->addr, ETH_ALEN); | |
2725 | i++; | |
2726 | } | |
2727 | } | |
2728 | netif_addr_unlock_bh(net_dev); | |
2729 | ||
2730 | /* Insert/renew unicast filters */ | |
2731 | if (table->stack_uc_count >= 0) { | |
2732 | for (i = 0; i < table->stack_uc_count; i++) { | |
2733 | efx_filter_init_rx(&spec, EFX_FILTER_PRI_REQUIRED, | |
2734 | EFX_FILTER_FLAG_RX_RSS | | |
2735 | EFX_FILTER_FLAG_RX_STACK, | |
2736 | 0); | |
2737 | efx_filter_set_eth_local(&spec, EFX_FILTER_VID_UNSPEC, | |
2738 | table->stack_uc_list[i].addr); | |
2739 | rc = efx_ef10_filter_insert(efx, &spec, true); | |
2740 | if (rc < 0) { | |
2741 | /* Fall back to unicast-promisc */ | |
2742 | while (i--) | |
2743 | efx_ef10_filter_remove_safe( | |
2744 | efx, EFX_FILTER_PRI_REQUIRED, | |
2745 | table->stack_uc_list[i].id); | |
2746 | table->stack_uc_count = -1; | |
2747 | break; | |
2748 | } | |
2749 | table->stack_uc_list[i].id = rc; | |
2750 | } | |
2751 | } | |
2752 | if (table->stack_uc_count < 0) { | |
2753 | efx_filter_init_rx(&spec, EFX_FILTER_PRI_REQUIRED, | |
2754 | EFX_FILTER_FLAG_RX_RSS | | |
2755 | EFX_FILTER_FLAG_RX_STACK, | |
2756 | 0); | |
2757 | efx_filter_set_uc_def(&spec); | |
2758 | rc = efx_ef10_filter_insert(efx, &spec, true); | |
2759 | if (rc < 0) { | |
2760 | WARN_ON(1); | |
2761 | table->stack_uc_count = 0; | |
2762 | } else { | |
2763 | table->stack_uc_list[0].id = rc; | |
2764 | } | |
2765 | } | |
2766 | ||
2767 | /* Insert/renew multicast filters */ | |
2768 | if (table->stack_mc_count >= 0) { | |
2769 | for (i = 0; i < table->stack_mc_count; i++) { | |
2770 | efx_filter_init_rx(&spec, EFX_FILTER_PRI_REQUIRED, | |
2771 | EFX_FILTER_FLAG_RX_RSS | | |
2772 | EFX_FILTER_FLAG_RX_STACK, | |
2773 | 0); | |
2774 | efx_filter_set_eth_local(&spec, EFX_FILTER_VID_UNSPEC, | |
2775 | table->stack_mc_list[i].addr); | |
2776 | rc = efx_ef10_filter_insert(efx, &spec, true); | |
2777 | if (rc < 0) { | |
2778 | /* Fall back to multicast-promisc */ | |
2779 | while (i--) | |
2780 | efx_ef10_filter_remove_safe( | |
2781 | efx, EFX_FILTER_PRI_REQUIRED, | |
2782 | table->stack_mc_list[i].id); | |
2783 | table->stack_mc_count = -1; | |
2784 | break; | |
2785 | } | |
2786 | table->stack_mc_list[i].id = rc; | |
2787 | } | |
2788 | } | |
2789 | if (table->stack_mc_count < 0) { | |
2790 | efx_filter_init_rx(&spec, EFX_FILTER_PRI_REQUIRED, | |
2791 | EFX_FILTER_FLAG_RX_RSS | | |
2792 | EFX_FILTER_FLAG_RX_STACK, | |
2793 | 0); | |
2794 | efx_filter_set_mc_def(&spec); | |
2795 | rc = efx_ef10_filter_insert(efx, &spec, true); | |
2796 | if (rc < 0) { | |
2797 | WARN_ON(1); | |
2798 | table->stack_mc_count = 0; | |
2799 | } else { | |
2800 | table->stack_mc_list[0].id = rc; | |
2801 | } | |
2802 | } | |
2803 | ||
2804 | /* Remove filters that weren't renewed. Since nothing else | |
2805 | * changes the STACK_OLD flag or removes these filters, we | |
2806 | * don't need to hold the filter_lock while scanning for | |
2807 | * these filters. | |
2808 | */ | |
2809 | for (i = 0; i < HUNT_FILTER_TBL_ROWS; i++) { | |
2810 | if (ACCESS_ONCE(table->entry[i].spec) & | |
2811 | EFX_EF10_FILTER_FLAG_STACK_OLD) { | |
2812 | if (efx_ef10_filter_remove_internal(efx, | |
2813 | EFX_FILTER_PRI_REQUIRED, | |
2814 | i, true) < 0) | |
2815 | remove_failed = true; | |
2816 | } | |
2817 | } | |
2818 | WARN_ON(remove_failed); | |
2819 | } | |
2820 | ||
2821 | static int efx_ef10_mac_reconfigure(struct efx_nic *efx) | |
2822 | { | |
2823 | efx_ef10_filter_sync_rx_mode(efx); | |
2824 | ||
2825 | return efx_mcdi_set_mac(efx); | |
2826 | } | |
2827 | ||
2828 | #ifdef CONFIG_SFC_MTD | |
2829 | ||
2830 | struct efx_ef10_nvram_type_info { | |
2831 | u16 type, type_mask; | |
2832 | u8 port; | |
2833 | const char *name; | |
2834 | }; | |
2835 | ||
2836 | static const struct efx_ef10_nvram_type_info efx_ef10_nvram_types[] = { | |
2837 | { NVRAM_PARTITION_TYPE_MC_FIRMWARE, 0, 0, "sfc_mcfw" }, | |
2838 | { NVRAM_PARTITION_TYPE_MC_FIRMWARE_BACKUP, 0, 0, "sfc_mcfw_backup" }, | |
2839 | { NVRAM_PARTITION_TYPE_EXPANSION_ROM, 0, 0, "sfc_exp_rom" }, | |
2840 | { NVRAM_PARTITION_TYPE_STATIC_CONFIG, 0, 0, "sfc_static_cfg" }, | |
2841 | { NVRAM_PARTITION_TYPE_DYNAMIC_CONFIG, 0, 0, "sfc_dynamic_cfg" }, | |
2842 | { NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT0, 0, 0, "sfc_exp_rom_cfg" }, | |
2843 | { NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT1, 0, 1, "sfc_exp_rom_cfg" }, | |
2844 | { NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT2, 0, 2, "sfc_exp_rom_cfg" }, | |
2845 | { NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT3, 0, 3, "sfc_exp_rom_cfg" }, | |
2846 | { NVRAM_PARTITION_TYPE_PHY_MIN, 0xff, 0, "sfc_phy_fw" }, | |
2847 | }; | |
2848 | ||
2849 | static int efx_ef10_mtd_probe_partition(struct efx_nic *efx, | |
2850 | struct efx_mcdi_mtd_partition *part, | |
2851 | unsigned int type) | |
2852 | { | |
2853 | MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_METADATA_IN_LEN); | |
2854 | MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_METADATA_OUT_LENMAX); | |
2855 | const struct efx_ef10_nvram_type_info *info; | |
2856 | size_t size, erase_size, outlen; | |
2857 | bool protected; | |
2858 | int rc; | |
2859 | ||
2860 | for (info = efx_ef10_nvram_types; ; info++) { | |
2861 | if (info == | |
2862 | efx_ef10_nvram_types + ARRAY_SIZE(efx_ef10_nvram_types)) | |
2863 | return -ENODEV; | |
2864 | if ((type & ~info->type_mask) == info->type) | |
2865 | break; | |
2866 | } | |
2867 | if (info->port != efx_port_num(efx)) | |
2868 | return -ENODEV; | |
2869 | ||
2870 | rc = efx_mcdi_nvram_info(efx, type, &size, &erase_size, &protected); | |
2871 | if (rc) | |
2872 | return rc; | |
2873 | if (protected) | |
2874 | return -ENODEV; /* hide it */ | |
2875 | ||
2876 | part->nvram_type = type; | |
2877 | ||
2878 | MCDI_SET_DWORD(inbuf, NVRAM_METADATA_IN_TYPE, type); | |
2879 | rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_METADATA, inbuf, sizeof(inbuf), | |
2880 | outbuf, sizeof(outbuf), &outlen); | |
2881 | if (rc) | |
2882 | return rc; | |
2883 | if (outlen < MC_CMD_NVRAM_METADATA_OUT_LENMIN) | |
2884 | return -EIO; | |
2885 | if (MCDI_DWORD(outbuf, NVRAM_METADATA_OUT_FLAGS) & | |
2886 | (1 << MC_CMD_NVRAM_METADATA_OUT_SUBTYPE_VALID_LBN)) | |
2887 | part->fw_subtype = MCDI_DWORD(outbuf, | |
2888 | NVRAM_METADATA_OUT_SUBTYPE); | |
2889 | ||
2890 | part->common.dev_type_name = "EF10 NVRAM manager"; | |
2891 | part->common.type_name = info->name; | |
2892 | ||
2893 | part->common.mtd.type = MTD_NORFLASH; | |
2894 | part->common.mtd.flags = MTD_CAP_NORFLASH; | |
2895 | part->common.mtd.size = size; | |
2896 | part->common.mtd.erasesize = erase_size; | |
2897 | ||
2898 | return 0; | |
2899 | } | |
2900 | ||
2901 | static int efx_ef10_mtd_probe(struct efx_nic *efx) | |
2902 | { | |
2903 | MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_PARTITIONS_OUT_LENMAX); | |
2904 | struct efx_mcdi_mtd_partition *parts; | |
2905 | size_t outlen, n_parts_total, i, n_parts; | |
2906 | unsigned int type; | |
2907 | int rc; | |
2908 | ||
2909 | ASSERT_RTNL(); | |
2910 | ||
2911 | BUILD_BUG_ON(MC_CMD_NVRAM_PARTITIONS_IN_LEN != 0); | |
2912 | rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_PARTITIONS, NULL, 0, | |
2913 | outbuf, sizeof(outbuf), &outlen); | |
2914 | if (rc) | |
2915 | return rc; | |
2916 | if (outlen < MC_CMD_NVRAM_PARTITIONS_OUT_LENMIN) | |
2917 | return -EIO; | |
2918 | ||
2919 | n_parts_total = MCDI_DWORD(outbuf, NVRAM_PARTITIONS_OUT_NUM_PARTITIONS); | |
2920 | if (n_parts_total > | |
2921 | MCDI_VAR_ARRAY_LEN(outlen, NVRAM_PARTITIONS_OUT_TYPE_ID)) | |
2922 | return -EIO; | |
2923 | ||
2924 | parts = kcalloc(n_parts_total, sizeof(*parts), GFP_KERNEL); | |
2925 | if (!parts) | |
2926 | return -ENOMEM; | |
2927 | ||
2928 | n_parts = 0; | |
2929 | for (i = 0; i < n_parts_total; i++) { | |
2930 | type = MCDI_ARRAY_DWORD(outbuf, NVRAM_PARTITIONS_OUT_TYPE_ID, | |
2931 | i); | |
2932 | rc = efx_ef10_mtd_probe_partition(efx, &parts[n_parts], type); | |
2933 | if (rc == 0) | |
2934 | n_parts++; | |
2935 | else if (rc != -ENODEV) | |
2936 | goto fail; | |
2937 | } | |
2938 | ||
2939 | rc = efx_mtd_add(efx, &parts[0].common, n_parts, sizeof(*parts)); | |
2940 | fail: | |
2941 | if (rc) | |
2942 | kfree(parts); | |
2943 | return rc; | |
2944 | } | |
2945 | ||
2946 | #endif /* CONFIG_SFC_MTD */ | |
2947 | ||
2948 | static void efx_ef10_ptp_write_host_time(struct efx_nic *efx, u32 host_time) | |
2949 | { | |
2950 | _efx_writed(efx, cpu_to_le32(host_time), ER_DZ_MC_DB_LWRD); | |
2951 | } | |
2952 | ||
2953 | const struct efx_nic_type efx_hunt_a0_nic_type = { | |
2954 | .mem_map_size = efx_ef10_mem_map_size, | |
2955 | .probe = efx_ef10_probe, | |
2956 | .remove = efx_ef10_remove, | |
2957 | .dimension_resources = efx_ef10_dimension_resources, | |
2958 | .init = efx_ef10_init_nic, | |
2959 | .fini = efx_port_dummy_op_void, | |
2960 | .map_reset_reason = efx_mcdi_map_reset_reason, | |
2961 | .map_reset_flags = efx_ef10_map_reset_flags, | |
2962 | .reset = efx_mcdi_reset, | |
2963 | .probe_port = efx_mcdi_port_probe, | |
2964 | .remove_port = efx_mcdi_port_remove, | |
2965 | .fini_dmaq = efx_ef10_fini_dmaq, | |
2966 | .describe_stats = efx_ef10_describe_stats, | |
2967 | .update_stats = efx_ef10_update_stats, | |
2968 | .start_stats = efx_mcdi_mac_start_stats, | |
2969 | .stop_stats = efx_mcdi_mac_stop_stats, | |
2970 | .set_id_led = efx_mcdi_set_id_led, | |
2971 | .push_irq_moderation = efx_ef10_push_irq_moderation, | |
2972 | .reconfigure_mac = efx_ef10_mac_reconfigure, | |
2973 | .check_mac_fault = efx_mcdi_mac_check_fault, | |
2974 | .reconfigure_port = efx_mcdi_port_reconfigure, | |
2975 | .get_wol = efx_ef10_get_wol, | |
2976 | .set_wol = efx_ef10_set_wol, | |
2977 | .resume_wol = efx_port_dummy_op_void, | |
2978 | /* TODO: test_chip */ | |
2979 | .test_nvram = efx_mcdi_nvram_test_all, | |
2980 | .mcdi_request = efx_ef10_mcdi_request, | |
2981 | .mcdi_poll_response = efx_ef10_mcdi_poll_response, | |
2982 | .mcdi_read_response = efx_ef10_mcdi_read_response, | |
2983 | .mcdi_poll_reboot = efx_ef10_mcdi_poll_reboot, | |
2984 | .irq_enable_master = efx_port_dummy_op_void, | |
2985 | .irq_test_generate = efx_ef10_irq_test_generate, | |
2986 | .irq_disable_non_ev = efx_port_dummy_op_void, | |
2987 | .irq_handle_msi = efx_ef10_msi_interrupt, | |
2988 | .irq_handle_legacy = efx_ef10_legacy_interrupt, | |
2989 | .tx_probe = efx_ef10_tx_probe, | |
2990 | .tx_init = efx_ef10_tx_init, | |
2991 | .tx_remove = efx_ef10_tx_remove, | |
2992 | .tx_write = efx_ef10_tx_write, | |
2993 | .rx_push_indir_table = efx_ef10_rx_push_indir_table, | |
2994 | .rx_probe = efx_ef10_rx_probe, | |
2995 | .rx_init = efx_ef10_rx_init, | |
2996 | .rx_remove = efx_ef10_rx_remove, | |
2997 | .rx_write = efx_ef10_rx_write, | |
2998 | .rx_defer_refill = efx_ef10_rx_defer_refill, | |
2999 | .ev_probe = efx_ef10_ev_probe, | |
3000 | .ev_init = efx_ef10_ev_init, | |
3001 | .ev_fini = efx_ef10_ev_fini, | |
3002 | .ev_remove = efx_ef10_ev_remove, | |
3003 | .ev_process = efx_ef10_ev_process, | |
3004 | .ev_read_ack = efx_ef10_ev_read_ack, | |
3005 | .ev_test_generate = efx_ef10_ev_test_generate, | |
3006 | .filter_table_probe = efx_ef10_filter_table_probe, | |
3007 | .filter_table_restore = efx_ef10_filter_table_restore, | |
3008 | .filter_table_remove = efx_ef10_filter_table_remove, | |
3009 | .filter_update_rx_scatter = efx_ef10_filter_update_rx_scatter, | |
3010 | .filter_insert = efx_ef10_filter_insert, | |
3011 | .filter_remove_safe = efx_ef10_filter_remove_safe, | |
3012 | .filter_get_safe = efx_ef10_filter_get_safe, | |
3013 | .filter_clear_rx = efx_ef10_filter_clear_rx, | |
3014 | .filter_count_rx_used = efx_ef10_filter_count_rx_used, | |
3015 | .filter_get_rx_id_limit = efx_ef10_filter_get_rx_id_limit, | |
3016 | .filter_get_rx_ids = efx_ef10_filter_get_rx_ids, | |
3017 | #ifdef CONFIG_RFS_ACCEL | |
3018 | .filter_rfs_insert = efx_ef10_filter_rfs_insert, | |
3019 | .filter_rfs_expire_one = efx_ef10_filter_rfs_expire_one, | |
3020 | #endif | |
3021 | #ifdef CONFIG_SFC_MTD | |
3022 | .mtd_probe = efx_ef10_mtd_probe, | |
3023 | .mtd_rename = efx_mcdi_mtd_rename, | |
3024 | .mtd_read = efx_mcdi_mtd_read, | |
3025 | .mtd_erase = efx_mcdi_mtd_erase, | |
3026 | .mtd_write = efx_mcdi_mtd_write, | |
3027 | .mtd_sync = efx_mcdi_mtd_sync, | |
3028 | #endif | |
3029 | .ptp_write_host_time = efx_ef10_ptp_write_host_time, | |
3030 | ||
3031 | .revision = EFX_REV_HUNT_A0, | |
3032 | .max_dma_mask = DMA_BIT_MASK(ESF_DZ_TX_KER_BUF_ADDR_WIDTH), | |
3033 | .rx_prefix_size = ES_DZ_RX_PREFIX_SIZE, | |
3034 | .rx_hash_offset = ES_DZ_RX_PREFIX_HASH_OFST, | |
3035 | .can_rx_scatter = true, | |
3036 | .always_rx_scatter = true, | |
3037 | .max_interrupt_mode = EFX_INT_MODE_MSIX, | |
3038 | .timer_period_max = 1 << ERF_DD_EVQ_IND_TIMER_VAL_WIDTH, | |
3039 | .offload_features = (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | | |
3040 | NETIF_F_RXHASH | NETIF_F_NTUPLE), | |
3041 | .mcdi_max_ver = 2, | |
3042 | .max_rx_ip_filters = HUNT_FILTER_TBL_ROWS, | |
3043 | }; |