Commit | Line | Data |
---|---|---|
8127d661 BH |
1 | /**************************************************************************** |
2 | * Driver for Solarflare network controllers and boards | |
3 | * Copyright 2012-2013 Solarflare Communications Inc. | |
4 | * | |
5 | * This program is free software; you can redistribute it and/or modify it | |
6 | * under the terms of the GNU General Public License version 2 as published | |
7 | * by the Free Software Foundation, incorporated herein by reference. | |
8 | */ | |
9 | ||
10 | #include "net_driver.h" | |
11 | #include "ef10_regs.h" | |
12 | #include "io.h" | |
13 | #include "mcdi.h" | |
14 | #include "mcdi_pcol.h" | |
15 | #include "nic.h" | |
16 | #include "workarounds.h" | |
17 | #include <linux/in.h> | |
18 | #include <linux/jhash.h> | |
19 | #include <linux/wait.h> | |
20 | #include <linux/workqueue.h> | |
21 | ||
22 | /* Hardware control for EF10 architecture including 'Huntington'. */ | |
23 | ||
24 | #define EFX_EF10_DRVGEN_EV 7 | |
25 | enum { | |
26 | EFX_EF10_TEST = 1, | |
27 | EFX_EF10_REFILL, | |
28 | }; | |
29 | ||
30 | /* The reserved RSS context value */ | |
31 | #define EFX_EF10_RSS_CONTEXT_INVALID 0xffffffff | |
32 | ||
33 | /* The filter table(s) are managed by firmware and we have write-only | |
34 | * access. When removing filters we must identify them to the | |
35 | * firmware by a 64-bit handle, but this is too wide for Linux kernel | |
36 | * interfaces (32-bit for RX NFC, 16-bit for RFS). Also, we need to | |
37 | * be able to tell in advance whether a requested insertion will | |
38 | * replace an existing filter. Therefore we maintain a software hash | |
39 | * table, which should be at least as large as the hardware hash | |
40 | * table. | |
41 | * | |
42 | * Huntington has a single 8K filter table shared between all filter | |
43 | * types and both ports. | |
44 | */ | |
45 | #define HUNT_FILTER_TBL_ROWS 8192 | |
46 | ||
47 | struct efx_ef10_filter_table { | |
48 | /* The RX match field masks supported by this fw & hw, in order of priority */ | |
49 | enum efx_filter_match_flags rx_match_flags[ | |
50 | MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_MAXNUM]; | |
51 | unsigned int rx_match_count; | |
52 | ||
53 | struct { | |
54 | unsigned long spec; /* pointer to spec plus flag bits */ | |
55 | /* BUSY flag indicates that an update is in progress. STACK_OLD is | |
56 | * used to mark and sweep stack-owned MAC filters. | |
57 | */ | |
58 | #define EFX_EF10_FILTER_FLAG_BUSY 1UL | |
59 | #define EFX_EF10_FILTER_FLAG_STACK_OLD 2UL | |
60 | #define EFX_EF10_FILTER_FLAGS 3UL | |
61 | u64 handle; /* firmware handle */ | |
62 | } *entry; | |
63 | wait_queue_head_t waitq; | |
64 | /* Shadow of net_device address lists, guarded by mac_lock */ | |
65 | #define EFX_EF10_FILTER_STACK_UC_MAX 32 | |
66 | #define EFX_EF10_FILTER_STACK_MC_MAX 256 | |
67 | struct { | |
68 | u8 addr[ETH_ALEN]; | |
69 | u16 id; | |
70 | } stack_uc_list[EFX_EF10_FILTER_STACK_UC_MAX], | |
71 | stack_mc_list[EFX_EF10_FILTER_STACK_MC_MAX]; | |
72 | int stack_uc_count; /* negative for PROMISC */ | |
73 | int stack_mc_count; /* negative for PROMISC/ALLMULTI */ | |
74 | }; | |
75 | ||
76 | /* An arbitrary search limit for the software hash table */ | |
77 | #define EFX_EF10_FILTER_SEARCH_LIMIT 200 | |
78 | ||
79 | static void efx_ef10_rx_push_indir_table(struct efx_nic *efx); | |
80 | static void efx_ef10_rx_free_indir_table(struct efx_nic *efx); | |
81 | static void efx_ef10_filter_table_remove(struct efx_nic *efx); | |
82 | ||
83 | static int efx_ef10_get_warm_boot_count(struct efx_nic *efx) | |
84 | { | |
85 | efx_dword_t reg; | |
86 | ||
87 | efx_readd(efx, ®, ER_DZ_BIU_MC_SFT_STATUS); | |
88 | return EFX_DWORD_FIELD(reg, EFX_WORD_1) == 0xb007 ? | |
89 | EFX_DWORD_FIELD(reg, EFX_WORD_0) : -EIO; | |
90 | } | |
91 | ||
92 | static unsigned int efx_ef10_mem_map_size(struct efx_nic *efx) | |
93 | { | |
94 | return resource_size(&efx->pci_dev->resource[EFX_MEM_BAR]); | |
95 | } | |
96 | ||
e5a2538a | 97 | static int efx_ef10_init_datapath_caps(struct efx_nic *efx) |
8127d661 BH |
98 | { |
99 | MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_CAPABILITIES_OUT_LEN); | |
100 | struct efx_ef10_nic_data *nic_data = efx->nic_data; | |
101 | size_t outlen; | |
102 | int rc; | |
103 | ||
104 | BUILD_BUG_ON(MC_CMD_GET_CAPABILITIES_IN_LEN != 0); | |
105 | ||
106 | rc = efx_mcdi_rpc(efx, MC_CMD_GET_CAPABILITIES, NULL, 0, | |
107 | outbuf, sizeof(outbuf), &outlen); | |
108 | if (rc) | |
109 | return rc; | |
e5a2538a BH |
110 | if (outlen < sizeof(outbuf)) { |
111 | netif_err(efx, drv, efx->net_dev, | |
112 | "unable to read datapath firmware capabilities\n"); | |
113 | return -EIO; | |
114 | } | |
115 | ||
116 | nic_data->datapath_caps = | |
117 | MCDI_DWORD(outbuf, GET_CAPABILITIES_OUT_FLAGS1); | |
8127d661 | 118 | |
e5a2538a BH |
119 | if (!(nic_data->datapath_caps & |
120 | (1 << MC_CMD_GET_CAPABILITIES_OUT_TX_TSO_LBN))) { | |
121 | netif_err(efx, drv, efx->net_dev, | |
122 | "current firmware does not support TSO\n"); | |
123 | return -ENODEV; | |
124 | } | |
125 | ||
126 | if (!(nic_data->datapath_caps & | |
127 | (1 << MC_CMD_GET_CAPABILITIES_OUT_RX_PREFIX_LEN_14_LBN))) { | |
128 | netif_err(efx, probe, efx->net_dev, | |
129 | "current firmware does not support an RX prefix\n"); | |
130 | return -ENODEV; | |
8127d661 BH |
131 | } |
132 | ||
133 | return 0; | |
134 | } | |
135 | ||
136 | static int efx_ef10_get_sysclk_freq(struct efx_nic *efx) | |
137 | { | |
138 | MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_CLOCK_OUT_LEN); | |
139 | int rc; | |
140 | ||
141 | rc = efx_mcdi_rpc(efx, MC_CMD_GET_CLOCK, NULL, 0, | |
142 | outbuf, sizeof(outbuf), NULL); | |
143 | if (rc) | |
144 | return rc; | |
145 | rc = MCDI_DWORD(outbuf, GET_CLOCK_OUT_SYS_FREQ); | |
146 | return rc > 0 ? rc : -ERANGE; | |
147 | } | |
148 | ||
149 | static int efx_ef10_get_mac_address(struct efx_nic *efx, u8 *mac_address) | |
150 | { | |
151 | MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_MAC_ADDRESSES_OUT_LEN); | |
152 | size_t outlen; | |
153 | int rc; | |
154 | ||
155 | BUILD_BUG_ON(MC_CMD_GET_MAC_ADDRESSES_IN_LEN != 0); | |
156 | ||
157 | rc = efx_mcdi_rpc(efx, MC_CMD_GET_MAC_ADDRESSES, NULL, 0, | |
158 | outbuf, sizeof(outbuf), &outlen); | |
159 | if (rc) | |
160 | return rc; | |
161 | if (outlen < MC_CMD_GET_MAC_ADDRESSES_OUT_LEN) | |
162 | return -EIO; | |
163 | ||
164 | memcpy(mac_address, | |
165 | MCDI_PTR(outbuf, GET_MAC_ADDRESSES_OUT_MAC_ADDR_BASE), ETH_ALEN); | |
166 | return 0; | |
167 | } | |
168 | ||
169 | static int efx_ef10_probe(struct efx_nic *efx) | |
170 | { | |
171 | struct efx_ef10_nic_data *nic_data; | |
172 | int i, rc; | |
173 | ||
174 | /* We can have one VI for each 8K region. However we need | |
175 | * multiple TX queues per channel. | |
176 | */ | |
177 | efx->max_channels = | |
178 | min_t(unsigned int, | |
179 | EFX_MAX_CHANNELS, | |
180 | resource_size(&efx->pci_dev->resource[EFX_MEM_BAR]) / | |
181 | (EFX_VI_PAGE_SIZE * EFX_TXQ_TYPES)); | |
182 | BUG_ON(efx->max_channels == 0); | |
183 | ||
184 | nic_data = kzalloc(sizeof(*nic_data), GFP_KERNEL); | |
185 | if (!nic_data) | |
186 | return -ENOMEM; | |
187 | efx->nic_data = nic_data; | |
188 | ||
189 | rc = efx_nic_alloc_buffer(efx, &nic_data->mcdi_buf, | |
190 | 8 + MCDI_CTL_SDU_LEN_MAX_V2, GFP_KERNEL); | |
191 | if (rc) | |
192 | goto fail1; | |
193 | ||
194 | /* Get the MC's warm boot count. In case it's rebooting right | |
195 | * now, be prepared to retry. | |
196 | */ | |
197 | i = 0; | |
198 | for (;;) { | |
199 | rc = efx_ef10_get_warm_boot_count(efx); | |
200 | if (rc >= 0) | |
201 | break; | |
202 | if (++i == 5) | |
203 | goto fail2; | |
204 | ssleep(1); | |
205 | } | |
206 | nic_data->warm_boot_count = rc; | |
207 | ||
208 | nic_data->rx_rss_context = EFX_EF10_RSS_CONTEXT_INVALID; | |
209 | ||
210 | /* In case we're recovering from a crash (kexec), we want to | |
211 | * cancel any outstanding request by the previous user of this | |
212 | * function. We send a special message using the least | |
213 | * significant bits of the 'high' (doorbell) register. | |
214 | */ | |
215 | _efx_writed(efx, cpu_to_le32(1), ER_DZ_MC_DB_HWRD); | |
216 | ||
217 | rc = efx_mcdi_init(efx); | |
218 | if (rc) | |
219 | goto fail2; | |
220 | ||
221 | /* Reset (most) configuration for this function */ | |
222 | rc = efx_mcdi_reset(efx, RESET_TYPE_ALL); | |
223 | if (rc) | |
224 | goto fail3; | |
225 | ||
226 | /* Enable event logging */ | |
227 | rc = efx_mcdi_log_ctrl(efx, true, false, 0); | |
228 | if (rc) | |
229 | goto fail3; | |
230 | ||
e5a2538a | 231 | rc = efx_ef10_init_datapath_caps(efx); |
8127d661 BH |
232 | if (rc < 0) |
233 | goto fail3; | |
234 | ||
235 | efx->rx_packet_len_offset = | |
236 | ES_DZ_RX_PREFIX_PKTLEN_OFST - ES_DZ_RX_PREFIX_SIZE; | |
237 | ||
8127d661 BH |
238 | rc = efx_mcdi_port_get_number(efx); |
239 | if (rc < 0) | |
240 | goto fail3; | |
241 | efx->port_num = rc; | |
242 | ||
243 | rc = efx_ef10_get_mac_address(efx, efx->net_dev->perm_addr); | |
244 | if (rc) | |
245 | goto fail3; | |
246 | ||
247 | rc = efx_ef10_get_sysclk_freq(efx); | |
248 | if (rc < 0) | |
249 | goto fail3; | |
250 | efx->timer_quantum_ns = 1536000 / rc; /* 1536 cycles */ | |
251 | ||
252 | /* Check whether firmware supports bug 35388 workaround */ | |
253 | rc = efx_mcdi_set_workaround(efx, MC_CMD_WORKAROUND_BUG35388, true); | |
254 | if (rc == 0) | |
255 | nic_data->workaround_35388 = true; | |
256 | else if (rc != -ENOSYS && rc != -ENOENT) | |
257 | goto fail3; | |
258 | netif_dbg(efx, probe, efx->net_dev, | |
259 | "workaround for bug 35388 is %sabled\n", | |
260 | nic_data->workaround_35388 ? "en" : "dis"); | |
261 | ||
262 | rc = efx_mcdi_mon_probe(efx); | |
263 | if (rc) | |
264 | goto fail3; | |
265 | ||
8127d661 BH |
266 | return 0; |
267 | ||
268 | fail3: | |
269 | efx_mcdi_fini(efx); | |
270 | fail2: | |
271 | efx_nic_free_buffer(efx, &nic_data->mcdi_buf); | |
272 | fail1: | |
273 | kfree(nic_data); | |
274 | efx->nic_data = NULL; | |
275 | return rc; | |
276 | } | |
277 | ||
278 | static int efx_ef10_free_vis(struct efx_nic *efx) | |
279 | { | |
280 | int rc = efx_mcdi_rpc(efx, MC_CMD_FREE_VIS, NULL, 0, NULL, 0, NULL); | |
281 | ||
282 | /* -EALREADY means nothing to free, so ignore */ | |
283 | if (rc == -EALREADY) | |
284 | rc = 0; | |
285 | return rc; | |
286 | } | |
287 | ||
183233be BH |
288 | #ifdef EFX_USE_PIO |
289 | ||
290 | static void efx_ef10_free_piobufs(struct efx_nic *efx) | |
291 | { | |
292 | struct efx_ef10_nic_data *nic_data = efx->nic_data; | |
293 | MCDI_DECLARE_BUF(inbuf, MC_CMD_FREE_PIOBUF_IN_LEN); | |
294 | unsigned int i; | |
295 | int rc; | |
296 | ||
297 | BUILD_BUG_ON(MC_CMD_FREE_PIOBUF_OUT_LEN != 0); | |
298 | ||
299 | for (i = 0; i < nic_data->n_piobufs; i++) { | |
300 | MCDI_SET_DWORD(inbuf, FREE_PIOBUF_IN_PIOBUF_HANDLE, | |
301 | nic_data->piobuf_handle[i]); | |
302 | rc = efx_mcdi_rpc(efx, MC_CMD_FREE_PIOBUF, inbuf, sizeof(inbuf), | |
303 | NULL, 0, NULL); | |
304 | WARN_ON(rc); | |
305 | } | |
306 | ||
307 | nic_data->n_piobufs = 0; | |
308 | } | |
309 | ||
310 | static int efx_ef10_alloc_piobufs(struct efx_nic *efx, unsigned int n) | |
311 | { | |
312 | struct efx_ef10_nic_data *nic_data = efx->nic_data; | |
313 | MCDI_DECLARE_BUF(outbuf, MC_CMD_ALLOC_PIOBUF_OUT_LEN); | |
314 | unsigned int i; | |
315 | size_t outlen; | |
316 | int rc = 0; | |
317 | ||
318 | BUILD_BUG_ON(MC_CMD_ALLOC_PIOBUF_IN_LEN != 0); | |
319 | ||
320 | for (i = 0; i < n; i++) { | |
321 | rc = efx_mcdi_rpc(efx, MC_CMD_ALLOC_PIOBUF, NULL, 0, | |
322 | outbuf, sizeof(outbuf), &outlen); | |
323 | if (rc) | |
324 | break; | |
325 | if (outlen < MC_CMD_ALLOC_PIOBUF_OUT_LEN) { | |
326 | rc = -EIO; | |
327 | break; | |
328 | } | |
329 | nic_data->piobuf_handle[i] = | |
330 | MCDI_DWORD(outbuf, ALLOC_PIOBUF_OUT_PIOBUF_HANDLE); | |
331 | netif_dbg(efx, probe, efx->net_dev, | |
332 | "allocated PIO buffer %u handle %x\n", i, | |
333 | nic_data->piobuf_handle[i]); | |
334 | } | |
335 | ||
336 | nic_data->n_piobufs = i; | |
337 | if (rc) | |
338 | efx_ef10_free_piobufs(efx); | |
339 | return rc; | |
340 | } | |
341 | ||
342 | static int efx_ef10_link_piobufs(struct efx_nic *efx) | |
343 | { | |
344 | struct efx_ef10_nic_data *nic_data = efx->nic_data; | |
345 | MCDI_DECLARE_BUF(inbuf, | |
346 | max(MC_CMD_LINK_PIOBUF_IN_LEN, | |
347 | MC_CMD_UNLINK_PIOBUF_IN_LEN)); | |
348 | struct efx_channel *channel; | |
349 | struct efx_tx_queue *tx_queue; | |
350 | unsigned int offset, index; | |
351 | int rc; | |
352 | ||
353 | BUILD_BUG_ON(MC_CMD_LINK_PIOBUF_OUT_LEN != 0); | |
354 | BUILD_BUG_ON(MC_CMD_UNLINK_PIOBUF_OUT_LEN != 0); | |
355 | ||
356 | /* Link a buffer to each VI in the write-combining mapping */ | |
357 | for (index = 0; index < nic_data->n_piobufs; ++index) { | |
358 | MCDI_SET_DWORD(inbuf, LINK_PIOBUF_IN_PIOBUF_HANDLE, | |
359 | nic_data->piobuf_handle[index]); | |
360 | MCDI_SET_DWORD(inbuf, LINK_PIOBUF_IN_TXQ_INSTANCE, | |
361 | nic_data->pio_write_vi_base + index); | |
362 | rc = efx_mcdi_rpc(efx, MC_CMD_LINK_PIOBUF, | |
363 | inbuf, MC_CMD_LINK_PIOBUF_IN_LEN, | |
364 | NULL, 0, NULL); | |
365 | if (rc) { | |
366 | netif_err(efx, drv, efx->net_dev, | |
367 | "failed to link VI %u to PIO buffer %u (%d)\n", | |
368 | nic_data->pio_write_vi_base + index, index, | |
369 | rc); | |
370 | goto fail; | |
371 | } | |
372 | netif_dbg(efx, probe, efx->net_dev, | |
373 | "linked VI %u to PIO buffer %u\n", | |
374 | nic_data->pio_write_vi_base + index, index); | |
375 | } | |
376 | ||
377 | /* Link a buffer to each TX queue */ | |
378 | efx_for_each_channel(channel, efx) { | |
379 | efx_for_each_channel_tx_queue(tx_queue, channel) { | |
380 | /* We assign the PIO buffers to queues in | |
381 | * reverse order to allow for the following | |
382 | * special case. | |
383 | */ | |
384 | offset = ((efx->tx_channel_offset + efx->n_tx_channels - | |
385 | tx_queue->channel->channel - 1) * | |
386 | efx_piobuf_size); | |
387 | index = offset / ER_DZ_TX_PIOBUF_SIZE; | |
388 | offset = offset % ER_DZ_TX_PIOBUF_SIZE; | |
389 | ||
390 | /* When the host page size is 4K, the first | |
391 | * host page in the WC mapping may be within | |
392 | * the same VI page as the last TX queue. We | |
393 | * can only link one buffer to each VI. | |
394 | */ | |
395 | if (tx_queue->queue == nic_data->pio_write_vi_base) { | |
396 | BUG_ON(index != 0); | |
397 | rc = 0; | |
398 | } else { | |
399 | MCDI_SET_DWORD(inbuf, | |
400 | LINK_PIOBUF_IN_PIOBUF_HANDLE, | |
401 | nic_data->piobuf_handle[index]); | |
402 | MCDI_SET_DWORD(inbuf, | |
403 | LINK_PIOBUF_IN_TXQ_INSTANCE, | |
404 | tx_queue->queue); | |
405 | rc = efx_mcdi_rpc(efx, MC_CMD_LINK_PIOBUF, | |
406 | inbuf, MC_CMD_LINK_PIOBUF_IN_LEN, | |
407 | NULL, 0, NULL); | |
408 | } | |
409 | ||
410 | if (rc) { | |
411 | /* This is non-fatal; the TX path just | |
412 | * won't use PIO for this queue | |
413 | */ | |
414 | netif_err(efx, drv, efx->net_dev, | |
415 | "failed to link VI %u to PIO buffer %u (%d)\n", | |
416 | tx_queue->queue, index, rc); | |
417 | tx_queue->piobuf = NULL; | |
418 | } else { | |
419 | tx_queue->piobuf = | |
420 | nic_data->pio_write_base + | |
421 | index * EFX_VI_PAGE_SIZE + offset; | |
422 | tx_queue->piobuf_offset = offset; | |
423 | netif_dbg(efx, probe, efx->net_dev, | |
424 | "linked VI %u to PIO buffer %u offset %x addr %p\n", | |
425 | tx_queue->queue, index, | |
426 | tx_queue->piobuf_offset, | |
427 | tx_queue->piobuf); | |
428 | } | |
429 | } | |
430 | } | |
431 | ||
432 | return 0; | |
433 | ||
434 | fail: | |
435 | while (index--) { | |
436 | MCDI_SET_DWORD(inbuf, UNLINK_PIOBUF_IN_TXQ_INSTANCE, | |
437 | nic_data->pio_write_vi_base + index); | |
438 | efx_mcdi_rpc(efx, MC_CMD_UNLINK_PIOBUF, | |
439 | inbuf, MC_CMD_UNLINK_PIOBUF_IN_LEN, | |
440 | NULL, 0, NULL); | |
441 | } | |
442 | return rc; | |
443 | } | |
444 | ||
445 | #else /* !EFX_USE_PIO */ | |
446 | ||
447 | static int efx_ef10_alloc_piobufs(struct efx_nic *efx, unsigned int n) | |
448 | { | |
449 | return n == 0 ? 0 : -ENOBUFS; | |
450 | } | |
451 | ||
452 | static int efx_ef10_link_piobufs(struct efx_nic *efx) | |
453 | { | |
454 | return 0; | |
455 | } | |
456 | ||
457 | static void efx_ef10_free_piobufs(struct efx_nic *efx) | |
458 | { | |
459 | } | |
460 | ||
461 | #endif /* EFX_USE_PIO */ | |
462 | ||
8127d661 BH |
463 | static void efx_ef10_remove(struct efx_nic *efx) |
464 | { | |
465 | struct efx_ef10_nic_data *nic_data = efx->nic_data; | |
466 | int rc; | |
467 | ||
468 | efx_mcdi_mon_remove(efx); | |
469 | ||
470 | /* This needs to be after efx_ptp_remove_channel() with no filters */ | |
471 | efx_ef10_rx_free_indir_table(efx); | |
472 | ||
183233be BH |
473 | if (nic_data->wc_membase) |
474 | iounmap(nic_data->wc_membase); | |
475 | ||
8127d661 BH |
476 | rc = efx_ef10_free_vis(efx); |
477 | WARN_ON(rc != 0); | |
478 | ||
183233be BH |
479 | if (!nic_data->must_restore_piobufs) |
480 | efx_ef10_free_piobufs(efx); | |
481 | ||
8127d661 BH |
482 | efx_mcdi_fini(efx); |
483 | efx_nic_free_buffer(efx, &nic_data->mcdi_buf); | |
484 | kfree(nic_data); | |
485 | } | |
486 | ||
487 | static int efx_ef10_alloc_vis(struct efx_nic *efx, | |
488 | unsigned int min_vis, unsigned int max_vis) | |
489 | { | |
490 | MCDI_DECLARE_BUF(inbuf, MC_CMD_ALLOC_VIS_IN_LEN); | |
491 | MCDI_DECLARE_BUF(outbuf, MC_CMD_ALLOC_VIS_OUT_LEN); | |
492 | struct efx_ef10_nic_data *nic_data = efx->nic_data; | |
493 | size_t outlen; | |
494 | int rc; | |
495 | ||
496 | MCDI_SET_DWORD(inbuf, ALLOC_VIS_IN_MIN_VI_COUNT, min_vis); | |
497 | MCDI_SET_DWORD(inbuf, ALLOC_VIS_IN_MAX_VI_COUNT, max_vis); | |
498 | rc = efx_mcdi_rpc(efx, MC_CMD_ALLOC_VIS, inbuf, sizeof(inbuf), | |
499 | outbuf, sizeof(outbuf), &outlen); | |
500 | if (rc != 0) | |
501 | return rc; | |
502 | ||
503 | if (outlen < MC_CMD_ALLOC_VIS_OUT_LEN) | |
504 | return -EIO; | |
505 | ||
506 | netif_dbg(efx, drv, efx->net_dev, "base VI is A0x%03x\n", | |
507 | MCDI_DWORD(outbuf, ALLOC_VIS_OUT_VI_BASE)); | |
508 | ||
509 | nic_data->vi_base = MCDI_DWORD(outbuf, ALLOC_VIS_OUT_VI_BASE); | |
510 | nic_data->n_allocated_vis = MCDI_DWORD(outbuf, ALLOC_VIS_OUT_VI_COUNT); | |
511 | return 0; | |
512 | } | |
513 | ||
183233be BH |
514 | /* Note that the failure path of this function does not free |
515 | * resources, as this will be done by efx_ef10_remove(). | |
516 | */ | |
8127d661 BH |
517 | static int efx_ef10_dimension_resources(struct efx_nic *efx) |
518 | { | |
183233be BH |
519 | struct efx_ef10_nic_data *nic_data = efx->nic_data; |
520 | unsigned int uc_mem_map_size, wc_mem_map_size; | |
521 | unsigned int min_vis, pio_write_vi_base, max_vis; | |
522 | void __iomem *membase; | |
523 | int rc; | |
524 | ||
525 | min_vis = max(efx->n_channels, efx->n_tx_channels * EFX_TXQ_TYPES); | |
8127d661 | 526 | |
183233be BH |
527 | #ifdef EFX_USE_PIO |
528 | /* Try to allocate PIO buffers if wanted and if the full | |
529 | * number of PIO buffers would be sufficient to allocate one | |
530 | * copy-buffer per TX channel. Failure is non-fatal, as there | |
531 | * are only a small number of PIO buffers shared between all | |
532 | * functions of the controller. | |
533 | */ | |
534 | if (efx_piobuf_size != 0 && | |
535 | ER_DZ_TX_PIOBUF_SIZE / efx_piobuf_size * EF10_TX_PIOBUF_COUNT >= | |
536 | efx->n_tx_channels) { | |
537 | unsigned int n_piobufs = | |
538 | DIV_ROUND_UP(efx->n_tx_channels, | |
539 | ER_DZ_TX_PIOBUF_SIZE / efx_piobuf_size); | |
540 | ||
541 | rc = efx_ef10_alloc_piobufs(efx, n_piobufs); | |
542 | if (rc) | |
543 | netif_err(efx, probe, efx->net_dev, | |
544 | "failed to allocate PIO buffers (%d)\n", rc); | |
545 | else | |
546 | netif_dbg(efx, probe, efx->net_dev, | |
547 | "allocated %u PIO buffers\n", n_piobufs); | |
548 | } | |
549 | #else | |
550 | nic_data->n_piobufs = 0; | |
551 | #endif | |
552 | ||
553 | /* PIO buffers should be mapped with write-combining enabled, | |
554 | * and we want to make single UC and WC mappings rather than | |
555 | * several of each (in fact that's the only option if host | |
556 | * page size is >4K). So we may allocate some extra VIs just | |
557 | * for writing PIO buffers through. | |
558 | */ | |
559 | uc_mem_map_size = PAGE_ALIGN((min_vis - 1) * EFX_VI_PAGE_SIZE + | |
560 | ER_DZ_TX_PIOBUF); | |
561 | if (nic_data->n_piobufs) { | |
562 | pio_write_vi_base = uc_mem_map_size / EFX_VI_PAGE_SIZE; | |
563 | wc_mem_map_size = (PAGE_ALIGN((pio_write_vi_base + | |
564 | nic_data->n_piobufs) * | |
565 | EFX_VI_PAGE_SIZE) - | |
566 | uc_mem_map_size); | |
567 | max_vis = pio_write_vi_base + nic_data->n_piobufs; | |
568 | } else { | |
569 | pio_write_vi_base = 0; | |
570 | wc_mem_map_size = 0; | |
571 | max_vis = min_vis; | |
572 | } | |
573 | ||
574 | /* In case the last attached driver failed to free VIs, do it now */ | |
575 | rc = efx_ef10_free_vis(efx); | |
576 | if (rc != 0) | |
577 | return rc; | |
578 | ||
579 | rc = efx_ef10_alloc_vis(efx, min_vis, max_vis); | |
580 | if (rc != 0) | |
581 | return rc; | |
582 | ||
583 | /* If we didn't get enough VIs to map all the PIO buffers, free the | |
584 | * PIO buffers | |
585 | */ | |
586 | if (nic_data->n_piobufs && | |
587 | nic_data->n_allocated_vis < | |
588 | pio_write_vi_base + nic_data->n_piobufs) { | |
589 | netif_dbg(efx, probe, efx->net_dev, | |
590 | "%u VIs are not sufficient to map %u PIO buffers\n", | |
591 | nic_data->n_allocated_vis, nic_data->n_piobufs); | |
592 | efx_ef10_free_piobufs(efx); | |
593 | } | |
594 | ||
595 | /* Shrink the original UC mapping of the memory BAR */ | |
596 | membase = ioremap_nocache(efx->membase_phys, uc_mem_map_size); | |
597 | if (!membase) { | |
598 | netif_err(efx, probe, efx->net_dev, | |
599 | "could not shrink memory BAR to %x\n", | |
600 | uc_mem_map_size); | |
601 | return -ENOMEM; | |
602 | } | |
603 | iounmap(efx->membase); | |
604 | efx->membase = membase; | |
605 | ||
606 | /* Set up the WC mapping if needed */ | |
607 | if (wc_mem_map_size) { | |
608 | nic_data->wc_membase = ioremap_wc(efx->membase_phys + | |
609 | uc_mem_map_size, | |
610 | wc_mem_map_size); | |
611 | if (!nic_data->wc_membase) { | |
612 | netif_err(efx, probe, efx->net_dev, | |
613 | "could not allocate WC mapping of size %x\n", | |
614 | wc_mem_map_size); | |
615 | return -ENOMEM; | |
616 | } | |
617 | nic_data->pio_write_vi_base = pio_write_vi_base; | |
618 | nic_data->pio_write_base = | |
619 | nic_data->wc_membase + | |
620 | (pio_write_vi_base * EFX_VI_PAGE_SIZE + ER_DZ_TX_PIOBUF - | |
621 | uc_mem_map_size); | |
622 | ||
623 | rc = efx_ef10_link_piobufs(efx); | |
624 | if (rc) | |
625 | efx_ef10_free_piobufs(efx); | |
626 | } | |
627 | ||
628 | netif_dbg(efx, probe, efx->net_dev, | |
629 | "memory BAR at %pa (virtual %p+%x UC, %p+%x WC)\n", | |
630 | &efx->membase_phys, efx->membase, uc_mem_map_size, | |
631 | nic_data->wc_membase, wc_mem_map_size); | |
632 | ||
633 | return 0; | |
8127d661 BH |
634 | } |
635 | ||
636 | static int efx_ef10_init_nic(struct efx_nic *efx) | |
637 | { | |
638 | struct efx_ef10_nic_data *nic_data = efx->nic_data; | |
639 | int rc; | |
640 | ||
a915ccc9 BH |
641 | if (nic_data->must_check_datapath_caps) { |
642 | rc = efx_ef10_init_datapath_caps(efx); | |
643 | if (rc) | |
644 | return rc; | |
645 | nic_data->must_check_datapath_caps = false; | |
646 | } | |
647 | ||
8127d661 BH |
648 | if (nic_data->must_realloc_vis) { |
649 | /* We cannot let the number of VIs change now */ | |
650 | rc = efx_ef10_alloc_vis(efx, nic_data->n_allocated_vis, | |
651 | nic_data->n_allocated_vis); | |
652 | if (rc) | |
653 | return rc; | |
654 | nic_data->must_realloc_vis = false; | |
655 | } | |
656 | ||
183233be BH |
657 | if (nic_data->must_restore_piobufs && nic_data->n_piobufs) { |
658 | rc = efx_ef10_alloc_piobufs(efx, nic_data->n_piobufs); | |
659 | if (rc == 0) { | |
660 | rc = efx_ef10_link_piobufs(efx); | |
661 | if (rc) | |
662 | efx_ef10_free_piobufs(efx); | |
663 | } | |
664 | ||
665 | /* Log an error on failure, but this is non-fatal */ | |
666 | if (rc) | |
667 | netif_err(efx, drv, efx->net_dev, | |
668 | "failed to restore PIO buffers (%d)\n", rc); | |
669 | nic_data->must_restore_piobufs = false; | |
670 | } | |
671 | ||
8127d661 BH |
672 | efx_ef10_rx_push_indir_table(efx); |
673 | return 0; | |
674 | } | |
675 | ||
676 | static int efx_ef10_map_reset_flags(u32 *flags) | |
677 | { | |
678 | enum { | |
679 | EF10_RESET_PORT = ((ETH_RESET_MAC | ETH_RESET_PHY) << | |
680 | ETH_RESET_SHARED_SHIFT), | |
681 | EF10_RESET_MC = ((ETH_RESET_DMA | ETH_RESET_FILTER | | |
682 | ETH_RESET_OFFLOAD | ETH_RESET_MAC | | |
683 | ETH_RESET_PHY | ETH_RESET_MGMT) << | |
684 | ETH_RESET_SHARED_SHIFT) | |
685 | }; | |
686 | ||
687 | /* We assume for now that our PCI function is permitted to | |
688 | * reset everything. | |
689 | */ | |
690 | ||
691 | if ((*flags & EF10_RESET_MC) == EF10_RESET_MC) { | |
692 | *flags &= ~EF10_RESET_MC; | |
693 | return RESET_TYPE_WORLD; | |
694 | } | |
695 | ||
696 | if ((*flags & EF10_RESET_PORT) == EF10_RESET_PORT) { | |
697 | *flags &= ~EF10_RESET_PORT; | |
698 | return RESET_TYPE_ALL; | |
699 | } | |
700 | ||
701 | /* no invisible reset implemented */ | |
702 | ||
703 | return -EINVAL; | |
704 | } | |
705 | ||
706 | #define EF10_DMA_STAT(ext_name, mcdi_name) \ | |
707 | [EF10_STAT_ ## ext_name] = \ | |
708 | { #ext_name, 64, 8 * MC_CMD_MAC_ ## mcdi_name } | |
709 | #define EF10_DMA_INVIS_STAT(int_name, mcdi_name) \ | |
710 | [EF10_STAT_ ## int_name] = \ | |
711 | { NULL, 64, 8 * MC_CMD_MAC_ ## mcdi_name } | |
712 | #define EF10_OTHER_STAT(ext_name) \ | |
713 | [EF10_STAT_ ## ext_name] = { #ext_name, 0, 0 } | |
714 | ||
715 | static const struct efx_hw_stat_desc efx_ef10_stat_desc[EF10_STAT_COUNT] = { | |
716 | EF10_DMA_STAT(tx_bytes, TX_BYTES), | |
717 | EF10_DMA_STAT(tx_packets, TX_PKTS), | |
718 | EF10_DMA_STAT(tx_pause, TX_PAUSE_PKTS), | |
719 | EF10_DMA_STAT(tx_control, TX_CONTROL_PKTS), | |
720 | EF10_DMA_STAT(tx_unicast, TX_UNICAST_PKTS), | |
721 | EF10_DMA_STAT(tx_multicast, TX_MULTICAST_PKTS), | |
722 | EF10_DMA_STAT(tx_broadcast, TX_BROADCAST_PKTS), | |
723 | EF10_DMA_STAT(tx_lt64, TX_LT64_PKTS), | |
724 | EF10_DMA_STAT(tx_64, TX_64_PKTS), | |
725 | EF10_DMA_STAT(tx_65_to_127, TX_65_TO_127_PKTS), | |
726 | EF10_DMA_STAT(tx_128_to_255, TX_128_TO_255_PKTS), | |
727 | EF10_DMA_STAT(tx_256_to_511, TX_256_TO_511_PKTS), | |
728 | EF10_DMA_STAT(tx_512_to_1023, TX_512_TO_1023_PKTS), | |
729 | EF10_DMA_STAT(tx_1024_to_15xx, TX_1024_TO_15XX_PKTS), | |
730 | EF10_DMA_STAT(tx_15xx_to_jumbo, TX_15XX_TO_JUMBO_PKTS), | |
731 | EF10_DMA_STAT(rx_bytes, RX_BYTES), | |
732 | EF10_DMA_INVIS_STAT(rx_bytes_minus_good_bytes, RX_BAD_BYTES), | |
733 | EF10_OTHER_STAT(rx_good_bytes), | |
734 | EF10_OTHER_STAT(rx_bad_bytes), | |
735 | EF10_DMA_STAT(rx_packets, RX_PKTS), | |
736 | EF10_DMA_STAT(rx_good, RX_GOOD_PKTS), | |
737 | EF10_DMA_STAT(rx_bad, RX_BAD_FCS_PKTS), | |
738 | EF10_DMA_STAT(rx_pause, RX_PAUSE_PKTS), | |
739 | EF10_DMA_STAT(rx_control, RX_CONTROL_PKTS), | |
740 | EF10_DMA_STAT(rx_unicast, RX_UNICAST_PKTS), | |
741 | EF10_DMA_STAT(rx_multicast, RX_MULTICAST_PKTS), | |
742 | EF10_DMA_STAT(rx_broadcast, RX_BROADCAST_PKTS), | |
743 | EF10_DMA_STAT(rx_lt64, RX_UNDERSIZE_PKTS), | |
744 | EF10_DMA_STAT(rx_64, RX_64_PKTS), | |
745 | EF10_DMA_STAT(rx_65_to_127, RX_65_TO_127_PKTS), | |
746 | EF10_DMA_STAT(rx_128_to_255, RX_128_TO_255_PKTS), | |
747 | EF10_DMA_STAT(rx_256_to_511, RX_256_TO_511_PKTS), | |
748 | EF10_DMA_STAT(rx_512_to_1023, RX_512_TO_1023_PKTS), | |
749 | EF10_DMA_STAT(rx_1024_to_15xx, RX_1024_TO_15XX_PKTS), | |
750 | EF10_DMA_STAT(rx_15xx_to_jumbo, RX_15XX_TO_JUMBO_PKTS), | |
751 | EF10_DMA_STAT(rx_gtjumbo, RX_GTJUMBO_PKTS), | |
752 | EF10_DMA_STAT(rx_bad_gtjumbo, RX_JABBER_PKTS), | |
753 | EF10_DMA_STAT(rx_overflow, RX_OVERFLOW_PKTS), | |
754 | EF10_DMA_STAT(rx_align_error, RX_ALIGN_ERROR_PKTS), | |
755 | EF10_DMA_STAT(rx_length_error, RX_LENGTH_ERROR_PKTS), | |
756 | EF10_DMA_STAT(rx_nodesc_drops, RX_NODESC_DROPS), | |
568d7a00 EC |
757 | EF10_DMA_STAT(rx_pm_trunc_bb_overflow, PM_TRUNC_BB_OVERFLOW), |
758 | EF10_DMA_STAT(rx_pm_discard_bb_overflow, PM_DISCARD_BB_OVERFLOW), | |
759 | EF10_DMA_STAT(rx_pm_trunc_vfifo_full, PM_TRUNC_VFIFO_FULL), | |
760 | EF10_DMA_STAT(rx_pm_discard_vfifo_full, PM_DISCARD_VFIFO_FULL), | |
761 | EF10_DMA_STAT(rx_pm_trunc_qbb, PM_TRUNC_QBB), | |
762 | EF10_DMA_STAT(rx_pm_discard_qbb, PM_DISCARD_QBB), | |
763 | EF10_DMA_STAT(rx_pm_discard_mapping, PM_DISCARD_MAPPING), | |
764 | EF10_DMA_STAT(rx_dp_q_disabled_packets, RXDP_Q_DISABLED_PKTS), | |
765 | EF10_DMA_STAT(rx_dp_di_dropped_packets, RXDP_DI_DROPPED_PKTS), | |
766 | EF10_DMA_STAT(rx_dp_streaming_packets, RXDP_STREAMING_PKTS), | |
767 | EF10_DMA_STAT(rx_dp_emerg_fetch, RXDP_EMERGENCY_FETCH_CONDITIONS), | |
768 | EF10_DMA_STAT(rx_dp_emerg_wait, RXDP_EMERGENCY_WAIT_CONDITIONS), | |
8127d661 BH |
769 | }; |
770 | ||
771 | #define HUNT_COMMON_STAT_MASK ((1ULL << EF10_STAT_tx_bytes) | \ | |
772 | (1ULL << EF10_STAT_tx_packets) | \ | |
773 | (1ULL << EF10_STAT_tx_pause) | \ | |
774 | (1ULL << EF10_STAT_tx_unicast) | \ | |
775 | (1ULL << EF10_STAT_tx_multicast) | \ | |
776 | (1ULL << EF10_STAT_tx_broadcast) | \ | |
777 | (1ULL << EF10_STAT_rx_bytes) | \ | |
778 | (1ULL << EF10_STAT_rx_bytes_minus_good_bytes) | \ | |
779 | (1ULL << EF10_STAT_rx_good_bytes) | \ | |
780 | (1ULL << EF10_STAT_rx_bad_bytes) | \ | |
781 | (1ULL << EF10_STAT_rx_packets) | \ | |
782 | (1ULL << EF10_STAT_rx_good) | \ | |
783 | (1ULL << EF10_STAT_rx_bad) | \ | |
784 | (1ULL << EF10_STAT_rx_pause) | \ | |
785 | (1ULL << EF10_STAT_rx_control) | \ | |
786 | (1ULL << EF10_STAT_rx_unicast) | \ | |
787 | (1ULL << EF10_STAT_rx_multicast) | \ | |
788 | (1ULL << EF10_STAT_rx_broadcast) | \ | |
789 | (1ULL << EF10_STAT_rx_lt64) | \ | |
790 | (1ULL << EF10_STAT_rx_64) | \ | |
791 | (1ULL << EF10_STAT_rx_65_to_127) | \ | |
792 | (1ULL << EF10_STAT_rx_128_to_255) | \ | |
793 | (1ULL << EF10_STAT_rx_256_to_511) | \ | |
794 | (1ULL << EF10_STAT_rx_512_to_1023) | \ | |
795 | (1ULL << EF10_STAT_rx_1024_to_15xx) | \ | |
796 | (1ULL << EF10_STAT_rx_15xx_to_jumbo) | \ | |
797 | (1ULL << EF10_STAT_rx_gtjumbo) | \ | |
798 | (1ULL << EF10_STAT_rx_bad_gtjumbo) | \ | |
799 | (1ULL << EF10_STAT_rx_overflow) | \ | |
800 | (1ULL << EF10_STAT_rx_nodesc_drops)) | |
801 | ||
802 | /* These statistics are only provided by the 10G MAC. For a 10G/40G | |
803 | * switchable port we do not expose these because they might not | |
804 | * include all the packets they should. | |
805 | */ | |
806 | #define HUNT_10G_ONLY_STAT_MASK ((1ULL << EF10_STAT_tx_control) | \ | |
807 | (1ULL << EF10_STAT_tx_lt64) | \ | |
808 | (1ULL << EF10_STAT_tx_64) | \ | |
809 | (1ULL << EF10_STAT_tx_65_to_127) | \ | |
810 | (1ULL << EF10_STAT_tx_128_to_255) | \ | |
811 | (1ULL << EF10_STAT_tx_256_to_511) | \ | |
812 | (1ULL << EF10_STAT_tx_512_to_1023) | \ | |
813 | (1ULL << EF10_STAT_tx_1024_to_15xx) | \ | |
814 | (1ULL << EF10_STAT_tx_15xx_to_jumbo)) | |
815 | ||
816 | /* These statistics are only provided by the 40G MAC. For a 10G/40G | |
817 | * switchable port we do expose these because the errors will otherwise | |
818 | * be silent. | |
819 | */ | |
820 | #define HUNT_40G_EXTRA_STAT_MASK ((1ULL << EF10_STAT_rx_align_error) | \ | |
821 | (1ULL << EF10_STAT_rx_length_error)) | |
822 | ||
568d7a00 EC |
823 | /* These statistics are only provided if the firmware supports the |
824 | * capability PM_AND_RXDP_COUNTERS. | |
825 | */ | |
826 | #define HUNT_PM_AND_RXDP_STAT_MASK ( \ | |
827 | (1ULL << EF10_STAT_rx_pm_trunc_bb_overflow) | \ | |
828 | (1ULL << EF10_STAT_rx_pm_discard_bb_overflow) | \ | |
829 | (1ULL << EF10_STAT_rx_pm_trunc_vfifo_full) | \ | |
830 | (1ULL << EF10_STAT_rx_pm_discard_vfifo_full) | \ | |
831 | (1ULL << EF10_STAT_rx_pm_trunc_qbb) | \ | |
832 | (1ULL << EF10_STAT_rx_pm_discard_qbb) | \ | |
833 | (1ULL << EF10_STAT_rx_pm_discard_mapping) | \ | |
834 | (1ULL << EF10_STAT_rx_dp_q_disabled_packets) | \ | |
835 | (1ULL << EF10_STAT_rx_dp_di_dropped_packets) | \ | |
836 | (1ULL << EF10_STAT_rx_dp_streaming_packets) | \ | |
837 | (1ULL << EF10_STAT_rx_dp_emerg_fetch) | \ | |
838 | (1ULL << EF10_STAT_rx_dp_emerg_wait)) | |
839 | ||
4bae913b | 840 | static u64 efx_ef10_raw_stat_mask(struct efx_nic *efx) |
8127d661 | 841 | { |
4bae913b | 842 | u64 raw_mask = HUNT_COMMON_STAT_MASK; |
8127d661 | 843 | u32 port_caps = efx_mcdi_phy_get_caps(efx); |
568d7a00 | 844 | struct efx_ef10_nic_data *nic_data = efx->nic_data; |
8127d661 BH |
845 | |
846 | if (port_caps & (1 << MC_CMD_PHY_CAP_40000FDX_LBN)) | |
4bae913b | 847 | raw_mask |= HUNT_40G_EXTRA_STAT_MASK; |
8127d661 | 848 | else |
4bae913b | 849 | raw_mask |= HUNT_10G_ONLY_STAT_MASK; |
568d7a00 EC |
850 | |
851 | if (nic_data->datapath_caps & | |
852 | (1 << MC_CMD_GET_CAPABILITIES_OUT_PM_AND_RXDP_COUNTERS_LBN)) | |
853 | raw_mask |= HUNT_PM_AND_RXDP_STAT_MASK; | |
854 | ||
4bae913b EC |
855 | return raw_mask; |
856 | } | |
857 | ||
858 | static void efx_ef10_get_stat_mask(struct efx_nic *efx, unsigned long *mask) | |
859 | { | |
860 | u64 raw_mask = efx_ef10_raw_stat_mask(efx); | |
861 | ||
862 | #if BITS_PER_LONG == 64 | |
863 | mask[0] = raw_mask; | |
864 | #else | |
865 | mask[0] = raw_mask & 0xffffffff; | |
866 | mask[1] = raw_mask >> 32; | |
867 | #endif | |
8127d661 BH |
868 | } |
869 | ||
870 | static size_t efx_ef10_describe_stats(struct efx_nic *efx, u8 *names) | |
871 | { | |
4bae913b EC |
872 | DECLARE_BITMAP(mask, EF10_STAT_COUNT); |
873 | ||
874 | efx_ef10_get_stat_mask(efx, mask); | |
8127d661 | 875 | return efx_nic_describe_stats(efx_ef10_stat_desc, EF10_STAT_COUNT, |
4bae913b | 876 | mask, names); |
8127d661 BH |
877 | } |
878 | ||
879 | static int efx_ef10_try_update_nic_stats(struct efx_nic *efx) | |
880 | { | |
881 | struct efx_ef10_nic_data *nic_data = efx->nic_data; | |
4bae913b | 882 | DECLARE_BITMAP(mask, EF10_STAT_COUNT); |
8127d661 BH |
883 | __le64 generation_start, generation_end; |
884 | u64 *stats = nic_data->stats; | |
885 | __le64 *dma_stats; | |
886 | ||
4bae913b EC |
887 | efx_ef10_get_stat_mask(efx, mask); |
888 | ||
8127d661 BH |
889 | dma_stats = efx->stats_buffer.addr; |
890 | nic_data = efx->nic_data; | |
891 | ||
892 | generation_end = dma_stats[MC_CMD_MAC_GENERATION_END]; | |
893 | if (generation_end == EFX_MC_STATS_GENERATION_INVALID) | |
894 | return 0; | |
895 | rmb(); | |
4bae913b | 896 | efx_nic_update_stats(efx_ef10_stat_desc, EF10_STAT_COUNT, mask, |
8127d661 | 897 | stats, efx->stats_buffer.addr, false); |
d546a893 | 898 | rmb(); |
8127d661 BH |
899 | generation_start = dma_stats[MC_CMD_MAC_GENERATION_START]; |
900 | if (generation_end != generation_start) | |
901 | return -EAGAIN; | |
902 | ||
903 | /* Update derived statistics */ | |
904 | stats[EF10_STAT_rx_good_bytes] = | |
905 | stats[EF10_STAT_rx_bytes] - | |
906 | stats[EF10_STAT_rx_bytes_minus_good_bytes]; | |
907 | efx_update_diff_stat(&stats[EF10_STAT_rx_bad_bytes], | |
908 | stats[EF10_STAT_rx_bytes_minus_good_bytes]); | |
909 | ||
910 | return 0; | |
911 | } | |
912 | ||
913 | ||
914 | static size_t efx_ef10_update_stats(struct efx_nic *efx, u64 *full_stats, | |
915 | struct rtnl_link_stats64 *core_stats) | |
916 | { | |
4bae913b | 917 | DECLARE_BITMAP(mask, EF10_STAT_COUNT); |
8127d661 BH |
918 | struct efx_ef10_nic_data *nic_data = efx->nic_data; |
919 | u64 *stats = nic_data->stats; | |
920 | size_t stats_count = 0, index; | |
921 | int retry; | |
922 | ||
4bae913b EC |
923 | efx_ef10_get_stat_mask(efx, mask); |
924 | ||
8127d661 BH |
925 | /* If we're unlucky enough to read statistics during the DMA, wait |
926 | * up to 10ms for it to finish (typically takes <500us) | |
927 | */ | |
928 | for (retry = 0; retry < 100; ++retry) { | |
929 | if (efx_ef10_try_update_nic_stats(efx) == 0) | |
930 | break; | |
931 | udelay(100); | |
932 | } | |
933 | ||
934 | if (full_stats) { | |
935 | for_each_set_bit(index, mask, EF10_STAT_COUNT) { | |
936 | if (efx_ef10_stat_desc[index].name) { | |
937 | *full_stats++ = stats[index]; | |
938 | ++stats_count; | |
939 | } | |
940 | } | |
941 | } | |
942 | ||
943 | if (core_stats) { | |
944 | core_stats->rx_packets = stats[EF10_STAT_rx_packets]; | |
945 | core_stats->tx_packets = stats[EF10_STAT_tx_packets]; | |
946 | core_stats->rx_bytes = stats[EF10_STAT_rx_bytes]; | |
947 | core_stats->tx_bytes = stats[EF10_STAT_tx_bytes]; | |
948 | core_stats->rx_dropped = stats[EF10_STAT_rx_nodesc_drops]; | |
949 | core_stats->multicast = stats[EF10_STAT_rx_multicast]; | |
950 | core_stats->rx_length_errors = | |
951 | stats[EF10_STAT_rx_gtjumbo] + | |
952 | stats[EF10_STAT_rx_length_error]; | |
953 | core_stats->rx_crc_errors = stats[EF10_STAT_rx_bad]; | |
954 | core_stats->rx_frame_errors = stats[EF10_STAT_rx_align_error]; | |
955 | core_stats->rx_fifo_errors = stats[EF10_STAT_rx_overflow]; | |
956 | core_stats->rx_errors = (core_stats->rx_length_errors + | |
957 | core_stats->rx_crc_errors + | |
958 | core_stats->rx_frame_errors); | |
959 | } | |
960 | ||
961 | return stats_count; | |
962 | } | |
963 | ||
964 | static void efx_ef10_push_irq_moderation(struct efx_channel *channel) | |
965 | { | |
966 | struct efx_nic *efx = channel->efx; | |
967 | unsigned int mode, value; | |
968 | efx_dword_t timer_cmd; | |
969 | ||
970 | if (channel->irq_moderation) { | |
971 | mode = 3; | |
972 | value = channel->irq_moderation - 1; | |
973 | } else { | |
974 | mode = 0; | |
975 | value = 0; | |
976 | } | |
977 | ||
978 | if (EFX_EF10_WORKAROUND_35388(efx)) { | |
979 | EFX_POPULATE_DWORD_3(timer_cmd, ERF_DD_EVQ_IND_TIMER_FLAGS, | |
980 | EFE_DD_EVQ_IND_TIMER_FLAGS, | |
981 | ERF_DD_EVQ_IND_TIMER_MODE, mode, | |
982 | ERF_DD_EVQ_IND_TIMER_VAL, value); | |
983 | efx_writed_page(efx, &timer_cmd, ER_DD_EVQ_INDIRECT, | |
984 | channel->channel); | |
985 | } else { | |
986 | EFX_POPULATE_DWORD_2(timer_cmd, ERF_DZ_TC_TIMER_MODE, mode, | |
987 | ERF_DZ_TC_TIMER_VAL, value); | |
988 | efx_writed_page(efx, &timer_cmd, ER_DZ_EVQ_TMR, | |
989 | channel->channel); | |
990 | } | |
991 | } | |
992 | ||
993 | static void efx_ef10_get_wol(struct efx_nic *efx, struct ethtool_wolinfo *wol) | |
994 | { | |
995 | wol->supported = 0; | |
996 | wol->wolopts = 0; | |
997 | memset(&wol->sopass, 0, sizeof(wol->sopass)); | |
998 | } | |
999 | ||
1000 | static int efx_ef10_set_wol(struct efx_nic *efx, u32 type) | |
1001 | { | |
1002 | if (type != 0) | |
1003 | return -EINVAL; | |
1004 | return 0; | |
1005 | } | |
1006 | ||
1007 | static void efx_ef10_mcdi_request(struct efx_nic *efx, | |
1008 | const efx_dword_t *hdr, size_t hdr_len, | |
1009 | const efx_dword_t *sdu, size_t sdu_len) | |
1010 | { | |
1011 | struct efx_ef10_nic_data *nic_data = efx->nic_data; | |
1012 | u8 *pdu = nic_data->mcdi_buf.addr; | |
1013 | ||
1014 | memcpy(pdu, hdr, hdr_len); | |
1015 | memcpy(pdu + hdr_len, sdu, sdu_len); | |
1016 | wmb(); | |
1017 | ||
1018 | /* The hardware provides 'low' and 'high' (doorbell) registers | |
1019 | * for passing the 64-bit address of an MCDI request to | |
1020 | * firmware. However the dwords are swapped by firmware. The | |
1021 | * least significant bits of the doorbell are then 0 for all | |
1022 | * MCDI requests due to alignment. | |
1023 | */ | |
1024 | _efx_writed(efx, cpu_to_le32((u64)nic_data->mcdi_buf.dma_addr >> 32), | |
1025 | ER_DZ_MC_DB_LWRD); | |
1026 | _efx_writed(efx, cpu_to_le32((u32)nic_data->mcdi_buf.dma_addr), | |
1027 | ER_DZ_MC_DB_HWRD); | |
1028 | } | |
1029 | ||
1030 | static bool efx_ef10_mcdi_poll_response(struct efx_nic *efx) | |
1031 | { | |
1032 | struct efx_ef10_nic_data *nic_data = efx->nic_data; | |
1033 | const efx_dword_t hdr = *(const efx_dword_t *)nic_data->mcdi_buf.addr; | |
1034 | ||
1035 | rmb(); | |
1036 | return EFX_DWORD_FIELD(hdr, MCDI_HEADER_RESPONSE); | |
1037 | } | |
1038 | ||
1039 | static void | |
1040 | efx_ef10_mcdi_read_response(struct efx_nic *efx, efx_dword_t *outbuf, | |
1041 | size_t offset, size_t outlen) | |
1042 | { | |
1043 | struct efx_ef10_nic_data *nic_data = efx->nic_data; | |
1044 | const u8 *pdu = nic_data->mcdi_buf.addr; | |
1045 | ||
1046 | memcpy(outbuf, pdu + offset, outlen); | |
1047 | } | |
1048 | ||
1049 | static int efx_ef10_mcdi_poll_reboot(struct efx_nic *efx) | |
1050 | { | |
1051 | struct efx_ef10_nic_data *nic_data = efx->nic_data; | |
1052 | int rc; | |
1053 | ||
1054 | rc = efx_ef10_get_warm_boot_count(efx); | |
1055 | if (rc < 0) { | |
1056 | /* The firmware is presumably in the process of | |
1057 | * rebooting. However, we are supposed to report each | |
1058 | * reboot just once, so we must only do that once we | |
1059 | * can read and store the updated warm boot count. | |
1060 | */ | |
1061 | return 0; | |
1062 | } | |
1063 | ||
1064 | if (rc == nic_data->warm_boot_count) | |
1065 | return 0; | |
1066 | ||
1067 | nic_data->warm_boot_count = rc; | |
1068 | ||
1069 | /* All our allocations have been reset */ | |
1070 | nic_data->must_realloc_vis = true; | |
1071 | nic_data->must_restore_filters = true; | |
183233be | 1072 | nic_data->must_restore_piobufs = true; |
8127d661 BH |
1073 | nic_data->rx_rss_context = EFX_EF10_RSS_CONTEXT_INVALID; |
1074 | ||
a915ccc9 BH |
1075 | /* The datapath firmware might have been changed */ |
1076 | nic_data->must_check_datapath_caps = true; | |
1077 | ||
869070c5 BH |
1078 | /* MAC statistics have been cleared on the NIC; clear the local |
1079 | * statistic that we update with efx_update_diff_stat(). | |
1080 | */ | |
1081 | nic_data->stats[EF10_STAT_rx_bad_bytes] = 0; | |
1082 | ||
8127d661 BH |
1083 | return -EIO; |
1084 | } | |
1085 | ||
1086 | /* Handle an MSI interrupt | |
1087 | * | |
1088 | * Handle an MSI hardware interrupt. This routine schedules event | |
1089 | * queue processing. No interrupt acknowledgement cycle is necessary. | |
1090 | * Also, we never need to check that the interrupt is for us, since | |
1091 | * MSI interrupts cannot be shared. | |
1092 | */ | |
1093 | static irqreturn_t efx_ef10_msi_interrupt(int irq, void *dev_id) | |
1094 | { | |
1095 | struct efx_msi_context *context = dev_id; | |
1096 | struct efx_nic *efx = context->efx; | |
1097 | ||
1098 | netif_vdbg(efx, intr, efx->net_dev, | |
1099 | "IRQ %d on CPU %d\n", irq, raw_smp_processor_id()); | |
1100 | ||
1101 | if (likely(ACCESS_ONCE(efx->irq_soft_enabled))) { | |
1102 | /* Note test interrupts */ | |
1103 | if (context->index == efx->irq_level) | |
1104 | efx->last_irq_cpu = raw_smp_processor_id(); | |
1105 | ||
1106 | /* Schedule processing of the channel */ | |
1107 | efx_schedule_channel_irq(efx->channel[context->index]); | |
1108 | } | |
1109 | ||
1110 | return IRQ_HANDLED; | |
1111 | } | |
1112 | ||
1113 | static irqreturn_t efx_ef10_legacy_interrupt(int irq, void *dev_id) | |
1114 | { | |
1115 | struct efx_nic *efx = dev_id; | |
1116 | bool soft_enabled = ACCESS_ONCE(efx->irq_soft_enabled); | |
1117 | struct efx_channel *channel; | |
1118 | efx_dword_t reg; | |
1119 | u32 queues; | |
1120 | ||
1121 | /* Read the ISR which also ACKs the interrupts */ | |
1122 | efx_readd(efx, ®, ER_DZ_BIU_INT_ISR); | |
1123 | queues = EFX_DWORD_FIELD(reg, ERF_DZ_ISR_REG); | |
1124 | ||
1125 | if (queues == 0) | |
1126 | return IRQ_NONE; | |
1127 | ||
1128 | if (likely(soft_enabled)) { | |
1129 | /* Note test interrupts */ | |
1130 | if (queues & (1U << efx->irq_level)) | |
1131 | efx->last_irq_cpu = raw_smp_processor_id(); | |
1132 | ||
1133 | efx_for_each_channel(channel, efx) { | |
1134 | if (queues & 1) | |
1135 | efx_schedule_channel_irq(channel); | |
1136 | queues >>= 1; | |
1137 | } | |
1138 | } | |
1139 | ||
1140 | netif_vdbg(efx, intr, efx->net_dev, | |
1141 | "IRQ %d on CPU %d status " EFX_DWORD_FMT "\n", | |
1142 | irq, raw_smp_processor_id(), EFX_DWORD_VAL(reg)); | |
1143 | ||
1144 | return IRQ_HANDLED; | |
1145 | } | |
1146 | ||
1147 | static void efx_ef10_irq_test_generate(struct efx_nic *efx) | |
1148 | { | |
1149 | MCDI_DECLARE_BUF(inbuf, MC_CMD_TRIGGER_INTERRUPT_IN_LEN); | |
1150 | ||
1151 | BUILD_BUG_ON(MC_CMD_TRIGGER_INTERRUPT_OUT_LEN != 0); | |
1152 | ||
1153 | MCDI_SET_DWORD(inbuf, TRIGGER_INTERRUPT_IN_INTR_LEVEL, efx->irq_level); | |
1154 | (void) efx_mcdi_rpc(efx, MC_CMD_TRIGGER_INTERRUPT, | |
1155 | inbuf, sizeof(inbuf), NULL, 0, NULL); | |
1156 | } | |
1157 | ||
1158 | static int efx_ef10_tx_probe(struct efx_tx_queue *tx_queue) | |
1159 | { | |
1160 | return efx_nic_alloc_buffer(tx_queue->efx, &tx_queue->txd.buf, | |
1161 | (tx_queue->ptr_mask + 1) * | |
1162 | sizeof(efx_qword_t), | |
1163 | GFP_KERNEL); | |
1164 | } | |
1165 | ||
1166 | /* This writes to the TX_DESC_WPTR and also pushes data */ | |
1167 | static inline void efx_ef10_push_tx_desc(struct efx_tx_queue *tx_queue, | |
1168 | const efx_qword_t *txd) | |
1169 | { | |
1170 | unsigned int write_ptr; | |
1171 | efx_oword_t reg; | |
1172 | ||
1173 | write_ptr = tx_queue->write_count & tx_queue->ptr_mask; | |
1174 | EFX_POPULATE_OWORD_1(reg, ERF_DZ_TX_DESC_WPTR, write_ptr); | |
1175 | reg.qword[0] = *txd; | |
1176 | efx_writeo_page(tx_queue->efx, ®, | |
1177 | ER_DZ_TX_DESC_UPD, tx_queue->queue); | |
1178 | } | |
1179 | ||
1180 | static void efx_ef10_tx_init(struct efx_tx_queue *tx_queue) | |
1181 | { | |
1182 | MCDI_DECLARE_BUF(inbuf, MC_CMD_INIT_TXQ_IN_LEN(EFX_MAX_DMAQ_SIZE * 8 / | |
1183 | EFX_BUF_SIZE)); | |
1184 | MCDI_DECLARE_BUF(outbuf, MC_CMD_INIT_TXQ_OUT_LEN); | |
1185 | bool csum_offload = tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD; | |
1186 | size_t entries = tx_queue->txd.buf.len / EFX_BUF_SIZE; | |
1187 | struct efx_channel *channel = tx_queue->channel; | |
1188 | struct efx_nic *efx = tx_queue->efx; | |
1189 | size_t inlen, outlen; | |
1190 | dma_addr_t dma_addr; | |
1191 | efx_qword_t *txd; | |
1192 | int rc; | |
1193 | int i; | |
1194 | ||
1195 | MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_SIZE, tx_queue->ptr_mask + 1); | |
1196 | MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_TARGET_EVQ, channel->channel); | |
1197 | MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_LABEL, tx_queue->queue); | |
1198 | MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_INSTANCE, tx_queue->queue); | |
1199 | MCDI_POPULATE_DWORD_2(inbuf, INIT_TXQ_IN_FLAGS, | |
1200 | INIT_TXQ_IN_FLAG_IP_CSUM_DIS, !csum_offload, | |
1201 | INIT_TXQ_IN_FLAG_TCP_CSUM_DIS, !csum_offload); | |
1202 | MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_OWNER_ID, 0); | |
1203 | MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_PORT_ID, EVB_PORT_ID_ASSIGNED); | |
1204 | ||
1205 | dma_addr = tx_queue->txd.buf.dma_addr; | |
1206 | ||
1207 | netif_dbg(efx, hw, efx->net_dev, "pushing TXQ %d. %zu entries (%llx)\n", | |
1208 | tx_queue->queue, entries, (u64)dma_addr); | |
1209 | ||
1210 | for (i = 0; i < entries; ++i) { | |
1211 | MCDI_SET_ARRAY_QWORD(inbuf, INIT_TXQ_IN_DMA_ADDR, i, dma_addr); | |
1212 | dma_addr += EFX_BUF_SIZE; | |
1213 | } | |
1214 | ||
1215 | inlen = MC_CMD_INIT_TXQ_IN_LEN(entries); | |
1216 | ||
1217 | rc = efx_mcdi_rpc(efx, MC_CMD_INIT_TXQ, inbuf, inlen, | |
1218 | outbuf, sizeof(outbuf), &outlen); | |
1219 | if (rc) | |
1220 | goto fail; | |
1221 | ||
1222 | /* A previous user of this TX queue might have set us up the | |
1223 | * bomb by writing a descriptor to the TX push collector but | |
1224 | * not the doorbell. (Each collector belongs to a port, not a | |
1225 | * queue or function, so cannot easily be reset.) We must | |
1226 | * attempt to push a no-op descriptor in its place. | |
1227 | */ | |
1228 | tx_queue->buffer[0].flags = EFX_TX_BUF_OPTION; | |
1229 | tx_queue->insert_count = 1; | |
1230 | txd = efx_tx_desc(tx_queue, 0); | |
1231 | EFX_POPULATE_QWORD_4(*txd, | |
1232 | ESF_DZ_TX_DESC_IS_OPT, true, | |
1233 | ESF_DZ_TX_OPTION_TYPE, | |
1234 | ESE_DZ_TX_OPTION_DESC_CRC_CSUM, | |
1235 | ESF_DZ_TX_OPTION_UDP_TCP_CSUM, csum_offload, | |
1236 | ESF_DZ_TX_OPTION_IP_CSUM, csum_offload); | |
1237 | tx_queue->write_count = 1; | |
1238 | wmb(); | |
1239 | efx_ef10_push_tx_desc(tx_queue, txd); | |
1240 | ||
1241 | return; | |
1242 | ||
1243 | fail: | |
1244 | WARN_ON(true); | |
1245 | netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); | |
1246 | } | |
1247 | ||
1248 | static void efx_ef10_tx_fini(struct efx_tx_queue *tx_queue) | |
1249 | { | |
1250 | MCDI_DECLARE_BUF(inbuf, MC_CMD_FINI_TXQ_IN_LEN); | |
1251 | MCDI_DECLARE_BUF(outbuf, MC_CMD_FINI_TXQ_OUT_LEN); | |
1252 | struct efx_nic *efx = tx_queue->efx; | |
1253 | size_t outlen; | |
1254 | int rc; | |
1255 | ||
1256 | MCDI_SET_DWORD(inbuf, FINI_TXQ_IN_INSTANCE, | |
1257 | tx_queue->queue); | |
1258 | ||
1259 | rc = efx_mcdi_rpc(efx, MC_CMD_FINI_TXQ, inbuf, sizeof(inbuf), | |
1260 | outbuf, sizeof(outbuf), &outlen); | |
1261 | ||
1262 | if (rc && rc != -EALREADY) | |
1263 | goto fail; | |
1264 | ||
1265 | return; | |
1266 | ||
1267 | fail: | |
1268 | netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); | |
1269 | } | |
1270 | ||
1271 | static void efx_ef10_tx_remove(struct efx_tx_queue *tx_queue) | |
1272 | { | |
1273 | efx_nic_free_buffer(tx_queue->efx, &tx_queue->txd.buf); | |
1274 | } | |
1275 | ||
1276 | /* This writes to the TX_DESC_WPTR; write pointer for TX descriptor ring */ | |
1277 | static inline void efx_ef10_notify_tx_desc(struct efx_tx_queue *tx_queue) | |
1278 | { | |
1279 | unsigned int write_ptr; | |
1280 | efx_dword_t reg; | |
1281 | ||
1282 | write_ptr = tx_queue->write_count & tx_queue->ptr_mask; | |
1283 | EFX_POPULATE_DWORD_1(reg, ERF_DZ_TX_DESC_WPTR_DWORD, write_ptr); | |
1284 | efx_writed_page(tx_queue->efx, ®, | |
1285 | ER_DZ_TX_DESC_UPD_DWORD, tx_queue->queue); | |
1286 | } | |
1287 | ||
1288 | static void efx_ef10_tx_write(struct efx_tx_queue *tx_queue) | |
1289 | { | |
1290 | unsigned int old_write_count = tx_queue->write_count; | |
1291 | struct efx_tx_buffer *buffer; | |
1292 | unsigned int write_ptr; | |
1293 | efx_qword_t *txd; | |
1294 | ||
1295 | BUG_ON(tx_queue->write_count == tx_queue->insert_count); | |
1296 | ||
1297 | do { | |
1298 | write_ptr = tx_queue->write_count & tx_queue->ptr_mask; | |
1299 | buffer = &tx_queue->buffer[write_ptr]; | |
1300 | txd = efx_tx_desc(tx_queue, write_ptr); | |
1301 | ++tx_queue->write_count; | |
1302 | ||
1303 | /* Create TX descriptor ring entry */ | |
1304 | if (buffer->flags & EFX_TX_BUF_OPTION) { | |
1305 | *txd = buffer->option; | |
1306 | } else { | |
1307 | BUILD_BUG_ON(EFX_TX_BUF_CONT != 1); | |
1308 | EFX_POPULATE_QWORD_3( | |
1309 | *txd, | |
1310 | ESF_DZ_TX_KER_CONT, | |
1311 | buffer->flags & EFX_TX_BUF_CONT, | |
1312 | ESF_DZ_TX_KER_BYTE_CNT, buffer->len, | |
1313 | ESF_DZ_TX_KER_BUF_ADDR, buffer->dma_addr); | |
1314 | } | |
1315 | } while (tx_queue->write_count != tx_queue->insert_count); | |
1316 | ||
1317 | wmb(); /* Ensure descriptors are written before they are fetched */ | |
1318 | ||
1319 | if (efx_nic_may_push_tx_desc(tx_queue, old_write_count)) { | |
1320 | txd = efx_tx_desc(tx_queue, | |
1321 | old_write_count & tx_queue->ptr_mask); | |
1322 | efx_ef10_push_tx_desc(tx_queue, txd); | |
1323 | ++tx_queue->pushes; | |
1324 | } else { | |
1325 | efx_ef10_notify_tx_desc(tx_queue); | |
1326 | } | |
1327 | } | |
1328 | ||
1329 | static int efx_ef10_alloc_rss_context(struct efx_nic *efx, u32 *context) | |
1330 | { | |
1331 | MCDI_DECLARE_BUF(inbuf, MC_CMD_RSS_CONTEXT_ALLOC_IN_LEN); | |
1332 | MCDI_DECLARE_BUF(outbuf, MC_CMD_RSS_CONTEXT_ALLOC_OUT_LEN); | |
1333 | size_t outlen; | |
1334 | int rc; | |
1335 | ||
1336 | MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_UPSTREAM_PORT_ID, | |
1337 | EVB_PORT_ID_ASSIGNED); | |
1338 | MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_TYPE, | |
1339 | MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_EXCLUSIVE); | |
1340 | MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_NUM_QUEUES, | |
1341 | EFX_MAX_CHANNELS); | |
1342 | ||
1343 | rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_ALLOC, inbuf, sizeof(inbuf), | |
1344 | outbuf, sizeof(outbuf), &outlen); | |
1345 | if (rc != 0) | |
1346 | return rc; | |
1347 | ||
1348 | if (outlen < MC_CMD_RSS_CONTEXT_ALLOC_OUT_LEN) | |
1349 | return -EIO; | |
1350 | ||
1351 | *context = MCDI_DWORD(outbuf, RSS_CONTEXT_ALLOC_OUT_RSS_CONTEXT_ID); | |
1352 | ||
1353 | return 0; | |
1354 | } | |
1355 | ||
1356 | static void efx_ef10_free_rss_context(struct efx_nic *efx, u32 context) | |
1357 | { | |
1358 | MCDI_DECLARE_BUF(inbuf, MC_CMD_RSS_CONTEXT_FREE_IN_LEN); | |
1359 | int rc; | |
1360 | ||
1361 | MCDI_SET_DWORD(inbuf, RSS_CONTEXT_FREE_IN_RSS_CONTEXT_ID, | |
1362 | context); | |
1363 | ||
1364 | rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_FREE, inbuf, sizeof(inbuf), | |
1365 | NULL, 0, NULL); | |
1366 | WARN_ON(rc != 0); | |
1367 | } | |
1368 | ||
1369 | static int efx_ef10_populate_rss_table(struct efx_nic *efx, u32 context) | |
1370 | { | |
1371 | MCDI_DECLARE_BUF(tablebuf, MC_CMD_RSS_CONTEXT_SET_TABLE_IN_LEN); | |
1372 | MCDI_DECLARE_BUF(keybuf, MC_CMD_RSS_CONTEXT_SET_KEY_IN_LEN); | |
1373 | int i, rc; | |
1374 | ||
1375 | MCDI_SET_DWORD(tablebuf, RSS_CONTEXT_SET_TABLE_IN_RSS_CONTEXT_ID, | |
1376 | context); | |
1377 | BUILD_BUG_ON(ARRAY_SIZE(efx->rx_indir_table) != | |
1378 | MC_CMD_RSS_CONTEXT_SET_TABLE_IN_INDIRECTION_TABLE_LEN); | |
1379 | ||
1380 | for (i = 0; i < ARRAY_SIZE(efx->rx_indir_table); ++i) | |
1381 | MCDI_PTR(tablebuf, | |
1382 | RSS_CONTEXT_SET_TABLE_IN_INDIRECTION_TABLE)[i] = | |
1383 | (u8) efx->rx_indir_table[i]; | |
1384 | ||
1385 | rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_SET_TABLE, tablebuf, | |
1386 | sizeof(tablebuf), NULL, 0, NULL); | |
1387 | if (rc != 0) | |
1388 | return rc; | |
1389 | ||
1390 | MCDI_SET_DWORD(keybuf, RSS_CONTEXT_SET_KEY_IN_RSS_CONTEXT_ID, | |
1391 | context); | |
1392 | BUILD_BUG_ON(ARRAY_SIZE(efx->rx_hash_key) != | |
1393 | MC_CMD_RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY_LEN); | |
1394 | for (i = 0; i < ARRAY_SIZE(efx->rx_hash_key); ++i) | |
1395 | MCDI_PTR(keybuf, RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY)[i] = | |
1396 | efx->rx_hash_key[i]; | |
1397 | ||
1398 | return efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_SET_KEY, keybuf, | |
1399 | sizeof(keybuf), NULL, 0, NULL); | |
1400 | } | |
1401 | ||
1402 | static void efx_ef10_rx_free_indir_table(struct efx_nic *efx) | |
1403 | { | |
1404 | struct efx_ef10_nic_data *nic_data = efx->nic_data; | |
1405 | ||
1406 | if (nic_data->rx_rss_context != EFX_EF10_RSS_CONTEXT_INVALID) | |
1407 | efx_ef10_free_rss_context(efx, nic_data->rx_rss_context); | |
1408 | nic_data->rx_rss_context = EFX_EF10_RSS_CONTEXT_INVALID; | |
1409 | } | |
1410 | ||
1411 | static void efx_ef10_rx_push_indir_table(struct efx_nic *efx) | |
1412 | { | |
1413 | struct efx_ef10_nic_data *nic_data = efx->nic_data; | |
1414 | int rc; | |
1415 | ||
1416 | netif_dbg(efx, drv, efx->net_dev, "pushing RX indirection table\n"); | |
1417 | ||
1418 | if (nic_data->rx_rss_context == EFX_EF10_RSS_CONTEXT_INVALID) { | |
1419 | rc = efx_ef10_alloc_rss_context(efx, &nic_data->rx_rss_context); | |
1420 | if (rc != 0) | |
1421 | goto fail; | |
1422 | } | |
1423 | ||
1424 | rc = efx_ef10_populate_rss_table(efx, nic_data->rx_rss_context); | |
1425 | if (rc != 0) | |
1426 | goto fail; | |
1427 | ||
1428 | return; | |
1429 | ||
1430 | fail: | |
1431 | netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); | |
1432 | } | |
1433 | ||
1434 | static int efx_ef10_rx_probe(struct efx_rx_queue *rx_queue) | |
1435 | { | |
1436 | return efx_nic_alloc_buffer(rx_queue->efx, &rx_queue->rxd.buf, | |
1437 | (rx_queue->ptr_mask + 1) * | |
1438 | sizeof(efx_qword_t), | |
1439 | GFP_KERNEL); | |
1440 | } | |
1441 | ||
1442 | static void efx_ef10_rx_init(struct efx_rx_queue *rx_queue) | |
1443 | { | |
1444 | MCDI_DECLARE_BUF(inbuf, | |
1445 | MC_CMD_INIT_RXQ_IN_LEN(EFX_MAX_DMAQ_SIZE * 8 / | |
1446 | EFX_BUF_SIZE)); | |
1447 | MCDI_DECLARE_BUF(outbuf, MC_CMD_INIT_RXQ_OUT_LEN); | |
1448 | struct efx_channel *channel = efx_rx_queue_channel(rx_queue); | |
1449 | size_t entries = rx_queue->rxd.buf.len / EFX_BUF_SIZE; | |
1450 | struct efx_nic *efx = rx_queue->efx; | |
1451 | size_t inlen, outlen; | |
1452 | dma_addr_t dma_addr; | |
1453 | int rc; | |
1454 | int i; | |
1455 | ||
1456 | rx_queue->scatter_n = 0; | |
1457 | rx_queue->scatter_len = 0; | |
1458 | ||
1459 | MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_SIZE, rx_queue->ptr_mask + 1); | |
1460 | MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_TARGET_EVQ, channel->channel); | |
1461 | MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_LABEL, efx_rx_queue_index(rx_queue)); | |
1462 | MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_INSTANCE, | |
1463 | efx_rx_queue_index(rx_queue)); | |
1464 | MCDI_POPULATE_DWORD_1(inbuf, INIT_RXQ_IN_FLAGS, | |
1465 | INIT_RXQ_IN_FLAG_PREFIX, 1); | |
1466 | MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_OWNER_ID, 0); | |
1467 | MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_PORT_ID, EVB_PORT_ID_ASSIGNED); | |
1468 | ||
1469 | dma_addr = rx_queue->rxd.buf.dma_addr; | |
1470 | ||
1471 | netif_dbg(efx, hw, efx->net_dev, "pushing RXQ %d. %zu entries (%llx)\n", | |
1472 | efx_rx_queue_index(rx_queue), entries, (u64)dma_addr); | |
1473 | ||
1474 | for (i = 0; i < entries; ++i) { | |
1475 | MCDI_SET_ARRAY_QWORD(inbuf, INIT_RXQ_IN_DMA_ADDR, i, dma_addr); | |
1476 | dma_addr += EFX_BUF_SIZE; | |
1477 | } | |
1478 | ||
1479 | inlen = MC_CMD_INIT_RXQ_IN_LEN(entries); | |
1480 | ||
1481 | rc = efx_mcdi_rpc(efx, MC_CMD_INIT_RXQ, inbuf, inlen, | |
1482 | outbuf, sizeof(outbuf), &outlen); | |
1483 | if (rc) | |
1484 | goto fail; | |
1485 | ||
1486 | return; | |
1487 | ||
1488 | fail: | |
1489 | WARN_ON(true); | |
1490 | netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); | |
1491 | } | |
1492 | ||
1493 | static void efx_ef10_rx_fini(struct efx_rx_queue *rx_queue) | |
1494 | { | |
1495 | MCDI_DECLARE_BUF(inbuf, MC_CMD_FINI_RXQ_IN_LEN); | |
1496 | MCDI_DECLARE_BUF(outbuf, MC_CMD_FINI_RXQ_OUT_LEN); | |
1497 | struct efx_nic *efx = rx_queue->efx; | |
1498 | size_t outlen; | |
1499 | int rc; | |
1500 | ||
1501 | MCDI_SET_DWORD(inbuf, FINI_RXQ_IN_INSTANCE, | |
1502 | efx_rx_queue_index(rx_queue)); | |
1503 | ||
1504 | rc = efx_mcdi_rpc(efx, MC_CMD_FINI_RXQ, inbuf, sizeof(inbuf), | |
1505 | outbuf, sizeof(outbuf), &outlen); | |
1506 | ||
1507 | if (rc && rc != -EALREADY) | |
1508 | goto fail; | |
1509 | ||
1510 | return; | |
1511 | ||
1512 | fail: | |
1513 | netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); | |
1514 | } | |
1515 | ||
1516 | static void efx_ef10_rx_remove(struct efx_rx_queue *rx_queue) | |
1517 | { | |
1518 | efx_nic_free_buffer(rx_queue->efx, &rx_queue->rxd.buf); | |
1519 | } | |
1520 | ||
1521 | /* This creates an entry in the RX descriptor queue */ | |
1522 | static inline void | |
1523 | efx_ef10_build_rx_desc(struct efx_rx_queue *rx_queue, unsigned int index) | |
1524 | { | |
1525 | struct efx_rx_buffer *rx_buf; | |
1526 | efx_qword_t *rxd; | |
1527 | ||
1528 | rxd = efx_rx_desc(rx_queue, index); | |
1529 | rx_buf = efx_rx_buffer(rx_queue, index); | |
1530 | EFX_POPULATE_QWORD_2(*rxd, | |
1531 | ESF_DZ_RX_KER_BYTE_CNT, rx_buf->len, | |
1532 | ESF_DZ_RX_KER_BUF_ADDR, rx_buf->dma_addr); | |
1533 | } | |
1534 | ||
1535 | static void efx_ef10_rx_write(struct efx_rx_queue *rx_queue) | |
1536 | { | |
1537 | struct efx_nic *efx = rx_queue->efx; | |
1538 | unsigned int write_count; | |
1539 | efx_dword_t reg; | |
1540 | ||
1541 | /* Firmware requires that RX_DESC_WPTR be a multiple of 8 */ | |
1542 | write_count = rx_queue->added_count & ~7; | |
1543 | if (rx_queue->notified_count == write_count) | |
1544 | return; | |
1545 | ||
1546 | do | |
1547 | efx_ef10_build_rx_desc( | |
1548 | rx_queue, | |
1549 | rx_queue->notified_count & rx_queue->ptr_mask); | |
1550 | while (++rx_queue->notified_count != write_count); | |
1551 | ||
1552 | wmb(); | |
1553 | EFX_POPULATE_DWORD_1(reg, ERF_DZ_RX_DESC_WPTR, | |
1554 | write_count & rx_queue->ptr_mask); | |
1555 | efx_writed_page(efx, ®, ER_DZ_RX_DESC_UPD, | |
1556 | efx_rx_queue_index(rx_queue)); | |
1557 | } | |
1558 | ||
1559 | static efx_mcdi_async_completer efx_ef10_rx_defer_refill_complete; | |
1560 | ||
1561 | static void efx_ef10_rx_defer_refill(struct efx_rx_queue *rx_queue) | |
1562 | { | |
1563 | struct efx_channel *channel = efx_rx_queue_channel(rx_queue); | |
1564 | MCDI_DECLARE_BUF(inbuf, MC_CMD_DRIVER_EVENT_IN_LEN); | |
1565 | efx_qword_t event; | |
1566 | ||
1567 | EFX_POPULATE_QWORD_2(event, | |
1568 | ESF_DZ_EV_CODE, EFX_EF10_DRVGEN_EV, | |
1569 | ESF_DZ_EV_DATA, EFX_EF10_REFILL); | |
1570 | ||
1571 | MCDI_SET_DWORD(inbuf, DRIVER_EVENT_IN_EVQ, channel->channel); | |
1572 | ||
1573 | /* MCDI_SET_QWORD is not appropriate here since EFX_POPULATE_* has | |
1574 | * already swapped the data to little-endian order. | |
1575 | */ | |
1576 | memcpy(MCDI_PTR(inbuf, DRIVER_EVENT_IN_DATA), &event.u64[0], | |
1577 | sizeof(efx_qword_t)); | |
1578 | ||
1579 | efx_mcdi_rpc_async(channel->efx, MC_CMD_DRIVER_EVENT, | |
1580 | inbuf, sizeof(inbuf), 0, | |
1581 | efx_ef10_rx_defer_refill_complete, 0); | |
1582 | } | |
1583 | ||
1584 | static void | |
1585 | efx_ef10_rx_defer_refill_complete(struct efx_nic *efx, unsigned long cookie, | |
1586 | int rc, efx_dword_t *outbuf, | |
1587 | size_t outlen_actual) | |
1588 | { | |
1589 | /* nothing to do */ | |
1590 | } | |
1591 | ||
1592 | static int efx_ef10_ev_probe(struct efx_channel *channel) | |
1593 | { | |
1594 | return efx_nic_alloc_buffer(channel->efx, &channel->eventq.buf, | |
1595 | (channel->eventq_mask + 1) * | |
1596 | sizeof(efx_qword_t), | |
1597 | GFP_KERNEL); | |
1598 | } | |
1599 | ||
1600 | static int efx_ef10_ev_init(struct efx_channel *channel) | |
1601 | { | |
1602 | MCDI_DECLARE_BUF(inbuf, | |
1603 | MC_CMD_INIT_EVQ_IN_LEN(EFX_MAX_EVQ_SIZE * 8 / | |
1604 | EFX_BUF_SIZE)); | |
1605 | MCDI_DECLARE_BUF(outbuf, MC_CMD_INIT_EVQ_OUT_LEN); | |
1606 | size_t entries = channel->eventq.buf.len / EFX_BUF_SIZE; | |
1607 | struct efx_nic *efx = channel->efx; | |
1608 | struct efx_ef10_nic_data *nic_data; | |
1609 | bool supports_rx_merge; | |
1610 | size_t inlen, outlen; | |
1611 | dma_addr_t dma_addr; | |
1612 | int rc; | |
1613 | int i; | |
1614 | ||
1615 | nic_data = efx->nic_data; | |
1616 | supports_rx_merge = | |
1617 | !!(nic_data->datapath_caps & | |
1618 | 1 << MC_CMD_GET_CAPABILITIES_OUT_RX_BATCHING_LBN); | |
1619 | ||
1620 | /* Fill event queue with all ones (i.e. empty events) */ | |
1621 | memset(channel->eventq.buf.addr, 0xff, channel->eventq.buf.len); | |
1622 | ||
1623 | MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_SIZE, channel->eventq_mask + 1); | |
1624 | MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_INSTANCE, channel->channel); | |
1625 | /* INIT_EVQ expects index in vector table, not absolute */ | |
1626 | MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_IRQ_NUM, channel->channel); | |
1627 | MCDI_POPULATE_DWORD_4(inbuf, INIT_EVQ_IN_FLAGS, | |
1628 | INIT_EVQ_IN_FLAG_INTERRUPTING, 1, | |
1629 | INIT_EVQ_IN_FLAG_RX_MERGE, 1, | |
1630 | INIT_EVQ_IN_FLAG_TX_MERGE, 1, | |
1631 | INIT_EVQ_IN_FLAG_CUT_THRU, !supports_rx_merge); | |
1632 | MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_TMR_MODE, | |
1633 | MC_CMD_INIT_EVQ_IN_TMR_MODE_DIS); | |
1634 | MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_TMR_LOAD, 0); | |
1635 | MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_TMR_RELOAD, 0); | |
1636 | MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_COUNT_MODE, | |
1637 | MC_CMD_INIT_EVQ_IN_COUNT_MODE_DIS); | |
1638 | MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_COUNT_THRSHLD, 0); | |
1639 | ||
1640 | dma_addr = channel->eventq.buf.dma_addr; | |
1641 | for (i = 0; i < entries; ++i) { | |
1642 | MCDI_SET_ARRAY_QWORD(inbuf, INIT_EVQ_IN_DMA_ADDR, i, dma_addr); | |
1643 | dma_addr += EFX_BUF_SIZE; | |
1644 | } | |
1645 | ||
1646 | inlen = MC_CMD_INIT_EVQ_IN_LEN(entries); | |
1647 | ||
1648 | rc = efx_mcdi_rpc(efx, MC_CMD_INIT_EVQ, inbuf, inlen, | |
1649 | outbuf, sizeof(outbuf), &outlen); | |
1650 | if (rc) | |
1651 | goto fail; | |
1652 | ||
1653 | /* IRQ return is ignored */ | |
1654 | ||
1655 | return 0; | |
1656 | ||
1657 | fail: | |
1658 | netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); | |
1659 | return rc; | |
1660 | } | |
1661 | ||
1662 | static void efx_ef10_ev_fini(struct efx_channel *channel) | |
1663 | { | |
1664 | MCDI_DECLARE_BUF(inbuf, MC_CMD_FINI_EVQ_IN_LEN); | |
1665 | MCDI_DECLARE_BUF(outbuf, MC_CMD_FINI_EVQ_OUT_LEN); | |
1666 | struct efx_nic *efx = channel->efx; | |
1667 | size_t outlen; | |
1668 | int rc; | |
1669 | ||
1670 | MCDI_SET_DWORD(inbuf, FINI_EVQ_IN_INSTANCE, channel->channel); | |
1671 | ||
1672 | rc = efx_mcdi_rpc(efx, MC_CMD_FINI_EVQ, inbuf, sizeof(inbuf), | |
1673 | outbuf, sizeof(outbuf), &outlen); | |
1674 | ||
1675 | if (rc && rc != -EALREADY) | |
1676 | goto fail; | |
1677 | ||
1678 | return; | |
1679 | ||
1680 | fail: | |
1681 | netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); | |
1682 | } | |
1683 | ||
1684 | static void efx_ef10_ev_remove(struct efx_channel *channel) | |
1685 | { | |
1686 | efx_nic_free_buffer(channel->efx, &channel->eventq.buf); | |
1687 | } | |
1688 | ||
1689 | static void efx_ef10_handle_rx_wrong_queue(struct efx_rx_queue *rx_queue, | |
1690 | unsigned int rx_queue_label) | |
1691 | { | |
1692 | struct efx_nic *efx = rx_queue->efx; | |
1693 | ||
1694 | netif_info(efx, hw, efx->net_dev, | |
1695 | "rx event arrived on queue %d labeled as queue %u\n", | |
1696 | efx_rx_queue_index(rx_queue), rx_queue_label); | |
1697 | ||
1698 | efx_schedule_reset(efx, RESET_TYPE_DISABLE); | |
1699 | } | |
1700 | ||
1701 | static void | |
1702 | efx_ef10_handle_rx_bad_lbits(struct efx_rx_queue *rx_queue, | |
1703 | unsigned int actual, unsigned int expected) | |
1704 | { | |
1705 | unsigned int dropped = (actual - expected) & rx_queue->ptr_mask; | |
1706 | struct efx_nic *efx = rx_queue->efx; | |
1707 | ||
1708 | netif_info(efx, hw, efx->net_dev, | |
1709 | "dropped %d events (index=%d expected=%d)\n", | |
1710 | dropped, actual, expected); | |
1711 | ||
1712 | efx_schedule_reset(efx, RESET_TYPE_DISABLE); | |
1713 | } | |
1714 | ||
1715 | /* partially received RX was aborted. clean up. */ | |
1716 | static void efx_ef10_handle_rx_abort(struct efx_rx_queue *rx_queue) | |
1717 | { | |
1718 | unsigned int rx_desc_ptr; | |
1719 | ||
1720 | WARN_ON(rx_queue->scatter_n == 0); | |
1721 | ||
1722 | netif_dbg(rx_queue->efx, hw, rx_queue->efx->net_dev, | |
1723 | "scattered RX aborted (dropping %u buffers)\n", | |
1724 | rx_queue->scatter_n); | |
1725 | ||
1726 | rx_desc_ptr = rx_queue->removed_count & rx_queue->ptr_mask; | |
1727 | ||
1728 | efx_rx_packet(rx_queue, rx_desc_ptr, rx_queue->scatter_n, | |
1729 | 0, EFX_RX_PKT_DISCARD); | |
1730 | ||
1731 | rx_queue->removed_count += rx_queue->scatter_n; | |
1732 | rx_queue->scatter_n = 0; | |
1733 | rx_queue->scatter_len = 0; | |
1734 | ++efx_rx_queue_channel(rx_queue)->n_rx_nodesc_trunc; | |
1735 | } | |
1736 | ||
1737 | static int efx_ef10_handle_rx_event(struct efx_channel *channel, | |
1738 | const efx_qword_t *event) | |
1739 | { | |
1740 | unsigned int rx_bytes, next_ptr_lbits, rx_queue_label, rx_l4_class; | |
1741 | unsigned int n_descs, n_packets, i; | |
1742 | struct efx_nic *efx = channel->efx; | |
1743 | struct efx_rx_queue *rx_queue; | |
1744 | bool rx_cont; | |
1745 | u16 flags = 0; | |
1746 | ||
1747 | if (unlikely(ACCESS_ONCE(efx->reset_pending))) | |
1748 | return 0; | |
1749 | ||
1750 | /* Basic packet information */ | |
1751 | rx_bytes = EFX_QWORD_FIELD(*event, ESF_DZ_RX_BYTES); | |
1752 | next_ptr_lbits = EFX_QWORD_FIELD(*event, ESF_DZ_RX_DSC_PTR_LBITS); | |
1753 | rx_queue_label = EFX_QWORD_FIELD(*event, ESF_DZ_RX_QLABEL); | |
1754 | rx_l4_class = EFX_QWORD_FIELD(*event, ESF_DZ_RX_L4_CLASS); | |
1755 | rx_cont = EFX_QWORD_FIELD(*event, ESF_DZ_RX_CONT); | |
1756 | ||
1757 | WARN_ON(EFX_QWORD_FIELD(*event, ESF_DZ_RX_DROP_EVENT)); | |
1758 | ||
1759 | rx_queue = efx_channel_get_rx_queue(channel); | |
1760 | ||
1761 | if (unlikely(rx_queue_label != efx_rx_queue_index(rx_queue))) | |
1762 | efx_ef10_handle_rx_wrong_queue(rx_queue, rx_queue_label); | |
1763 | ||
1764 | n_descs = ((next_ptr_lbits - rx_queue->removed_count) & | |
1765 | ((1 << ESF_DZ_RX_DSC_PTR_LBITS_WIDTH) - 1)); | |
1766 | ||
1767 | if (n_descs != rx_queue->scatter_n + 1) { | |
1768 | /* detect rx abort */ | |
1769 | if (unlikely(n_descs == rx_queue->scatter_n)) { | |
1770 | WARN_ON(rx_bytes != 0); | |
1771 | efx_ef10_handle_rx_abort(rx_queue); | |
1772 | return 0; | |
1773 | } | |
1774 | ||
1775 | if (unlikely(rx_queue->scatter_n != 0)) { | |
1776 | /* Scattered packet completions cannot be | |
1777 | * merged, so something has gone wrong. | |
1778 | */ | |
1779 | efx_ef10_handle_rx_bad_lbits( | |
1780 | rx_queue, next_ptr_lbits, | |
1781 | (rx_queue->removed_count + | |
1782 | rx_queue->scatter_n + 1) & | |
1783 | ((1 << ESF_DZ_RX_DSC_PTR_LBITS_WIDTH) - 1)); | |
1784 | return 0; | |
1785 | } | |
1786 | ||
1787 | /* Merged completion for multiple non-scattered packets */ | |
1788 | rx_queue->scatter_n = 1; | |
1789 | rx_queue->scatter_len = 0; | |
1790 | n_packets = n_descs; | |
1791 | ++channel->n_rx_merge_events; | |
1792 | channel->n_rx_merge_packets += n_packets; | |
1793 | flags |= EFX_RX_PKT_PREFIX_LEN; | |
1794 | } else { | |
1795 | ++rx_queue->scatter_n; | |
1796 | rx_queue->scatter_len += rx_bytes; | |
1797 | if (rx_cont) | |
1798 | return 0; | |
1799 | n_packets = 1; | |
1800 | } | |
1801 | ||
1802 | if (unlikely(EFX_QWORD_FIELD(*event, ESF_DZ_RX_ECRC_ERR))) | |
1803 | flags |= EFX_RX_PKT_DISCARD; | |
1804 | ||
1805 | if (unlikely(EFX_QWORD_FIELD(*event, ESF_DZ_RX_IPCKSUM_ERR))) { | |
1806 | channel->n_rx_ip_hdr_chksum_err += n_packets; | |
1807 | } else if (unlikely(EFX_QWORD_FIELD(*event, | |
1808 | ESF_DZ_RX_TCPUDP_CKSUM_ERR))) { | |
1809 | channel->n_rx_tcp_udp_chksum_err += n_packets; | |
1810 | } else if (rx_l4_class == ESE_DZ_L4_CLASS_TCP || | |
1811 | rx_l4_class == ESE_DZ_L4_CLASS_UDP) { | |
1812 | flags |= EFX_RX_PKT_CSUMMED; | |
1813 | } | |
1814 | ||
1815 | if (rx_l4_class == ESE_DZ_L4_CLASS_TCP) | |
1816 | flags |= EFX_RX_PKT_TCP; | |
1817 | ||
1818 | channel->irq_mod_score += 2 * n_packets; | |
1819 | ||
1820 | /* Handle received packet(s) */ | |
1821 | for (i = 0; i < n_packets; i++) { | |
1822 | efx_rx_packet(rx_queue, | |
1823 | rx_queue->removed_count & rx_queue->ptr_mask, | |
1824 | rx_queue->scatter_n, rx_queue->scatter_len, | |
1825 | flags); | |
1826 | rx_queue->removed_count += rx_queue->scatter_n; | |
1827 | } | |
1828 | ||
1829 | rx_queue->scatter_n = 0; | |
1830 | rx_queue->scatter_len = 0; | |
1831 | ||
1832 | return n_packets; | |
1833 | } | |
1834 | ||
1835 | static int | |
1836 | efx_ef10_handle_tx_event(struct efx_channel *channel, efx_qword_t *event) | |
1837 | { | |
1838 | struct efx_nic *efx = channel->efx; | |
1839 | struct efx_tx_queue *tx_queue; | |
1840 | unsigned int tx_ev_desc_ptr; | |
1841 | unsigned int tx_ev_q_label; | |
1842 | int tx_descs = 0; | |
1843 | ||
1844 | if (unlikely(ACCESS_ONCE(efx->reset_pending))) | |
1845 | return 0; | |
1846 | ||
1847 | if (unlikely(EFX_QWORD_FIELD(*event, ESF_DZ_TX_DROP_EVENT))) | |
1848 | return 0; | |
1849 | ||
1850 | /* Transmit completion */ | |
1851 | tx_ev_desc_ptr = EFX_QWORD_FIELD(*event, ESF_DZ_TX_DESCR_INDX); | |
1852 | tx_ev_q_label = EFX_QWORD_FIELD(*event, ESF_DZ_TX_QLABEL); | |
1853 | tx_queue = efx_channel_get_tx_queue(channel, | |
1854 | tx_ev_q_label % EFX_TXQ_TYPES); | |
1855 | tx_descs = ((tx_ev_desc_ptr + 1 - tx_queue->read_count) & | |
1856 | tx_queue->ptr_mask); | |
1857 | efx_xmit_done(tx_queue, tx_ev_desc_ptr & tx_queue->ptr_mask); | |
1858 | ||
1859 | return tx_descs; | |
1860 | } | |
1861 | ||
1862 | static void | |
1863 | efx_ef10_handle_driver_event(struct efx_channel *channel, efx_qword_t *event) | |
1864 | { | |
1865 | struct efx_nic *efx = channel->efx; | |
1866 | int subcode; | |
1867 | ||
1868 | subcode = EFX_QWORD_FIELD(*event, ESF_DZ_DRV_SUB_CODE); | |
1869 | ||
1870 | switch (subcode) { | |
1871 | case ESE_DZ_DRV_TIMER_EV: | |
1872 | case ESE_DZ_DRV_WAKE_UP_EV: | |
1873 | break; | |
1874 | case ESE_DZ_DRV_START_UP_EV: | |
1875 | /* event queue init complete. ok. */ | |
1876 | break; | |
1877 | default: | |
1878 | netif_err(efx, hw, efx->net_dev, | |
1879 | "channel %d unknown driver event type %d" | |
1880 | " (data " EFX_QWORD_FMT ")\n", | |
1881 | channel->channel, subcode, | |
1882 | EFX_QWORD_VAL(*event)); | |
1883 | ||
1884 | } | |
1885 | } | |
1886 | ||
1887 | static void efx_ef10_handle_driver_generated_event(struct efx_channel *channel, | |
1888 | efx_qword_t *event) | |
1889 | { | |
1890 | struct efx_nic *efx = channel->efx; | |
1891 | u32 subcode; | |
1892 | ||
1893 | subcode = EFX_QWORD_FIELD(*event, EFX_DWORD_0); | |
1894 | ||
1895 | switch (subcode) { | |
1896 | case EFX_EF10_TEST: | |
1897 | channel->event_test_cpu = raw_smp_processor_id(); | |
1898 | break; | |
1899 | case EFX_EF10_REFILL: | |
1900 | /* The queue must be empty, so we won't receive any rx | |
1901 | * events, so efx_process_channel() won't refill the | |
1902 | * queue. Refill it here | |
1903 | */ | |
1904 | efx_fast_push_rx_descriptors(&channel->rx_queue); | |
1905 | break; | |
1906 | default: | |
1907 | netif_err(efx, hw, efx->net_dev, | |
1908 | "channel %d unknown driver event type %u" | |
1909 | " (data " EFX_QWORD_FMT ")\n", | |
1910 | channel->channel, (unsigned) subcode, | |
1911 | EFX_QWORD_VAL(*event)); | |
1912 | } | |
1913 | } | |
1914 | ||
1915 | static int efx_ef10_ev_process(struct efx_channel *channel, int quota) | |
1916 | { | |
1917 | struct efx_nic *efx = channel->efx; | |
1918 | efx_qword_t event, *p_event; | |
1919 | unsigned int read_ptr; | |
1920 | int ev_code; | |
1921 | int tx_descs = 0; | |
1922 | int spent = 0; | |
1923 | ||
1924 | read_ptr = channel->eventq_read_ptr; | |
1925 | ||
1926 | for (;;) { | |
1927 | p_event = efx_event(channel, read_ptr); | |
1928 | event = *p_event; | |
1929 | ||
1930 | if (!efx_event_present(&event)) | |
1931 | break; | |
1932 | ||
1933 | EFX_SET_QWORD(*p_event); | |
1934 | ||
1935 | ++read_ptr; | |
1936 | ||
1937 | ev_code = EFX_QWORD_FIELD(event, ESF_DZ_EV_CODE); | |
1938 | ||
1939 | netif_vdbg(efx, drv, efx->net_dev, | |
1940 | "processing event on %d " EFX_QWORD_FMT "\n", | |
1941 | channel->channel, EFX_QWORD_VAL(event)); | |
1942 | ||
1943 | switch (ev_code) { | |
1944 | case ESE_DZ_EV_CODE_MCDI_EV: | |
1945 | efx_mcdi_process_event(channel, &event); | |
1946 | break; | |
1947 | case ESE_DZ_EV_CODE_RX_EV: | |
1948 | spent += efx_ef10_handle_rx_event(channel, &event); | |
1949 | if (spent >= quota) { | |
1950 | /* XXX can we split a merged event to | |
1951 | * avoid going over-quota? | |
1952 | */ | |
1953 | spent = quota; | |
1954 | goto out; | |
1955 | } | |
1956 | break; | |
1957 | case ESE_DZ_EV_CODE_TX_EV: | |
1958 | tx_descs += efx_ef10_handle_tx_event(channel, &event); | |
1959 | if (tx_descs > efx->txq_entries) { | |
1960 | spent = quota; | |
1961 | goto out; | |
1962 | } else if (++spent == quota) { | |
1963 | goto out; | |
1964 | } | |
1965 | break; | |
1966 | case ESE_DZ_EV_CODE_DRIVER_EV: | |
1967 | efx_ef10_handle_driver_event(channel, &event); | |
1968 | if (++spent == quota) | |
1969 | goto out; | |
1970 | break; | |
1971 | case EFX_EF10_DRVGEN_EV: | |
1972 | efx_ef10_handle_driver_generated_event(channel, &event); | |
1973 | break; | |
1974 | default: | |
1975 | netif_err(efx, hw, efx->net_dev, | |
1976 | "channel %d unknown event type %d" | |
1977 | " (data " EFX_QWORD_FMT ")\n", | |
1978 | channel->channel, ev_code, | |
1979 | EFX_QWORD_VAL(event)); | |
1980 | } | |
1981 | } | |
1982 | ||
1983 | out: | |
1984 | channel->eventq_read_ptr = read_ptr; | |
1985 | return spent; | |
1986 | } | |
1987 | ||
1988 | static void efx_ef10_ev_read_ack(struct efx_channel *channel) | |
1989 | { | |
1990 | struct efx_nic *efx = channel->efx; | |
1991 | efx_dword_t rptr; | |
1992 | ||
1993 | if (EFX_EF10_WORKAROUND_35388(efx)) { | |
1994 | BUILD_BUG_ON(EFX_MIN_EVQ_SIZE < | |
1995 | (1 << ERF_DD_EVQ_IND_RPTR_WIDTH)); | |
1996 | BUILD_BUG_ON(EFX_MAX_EVQ_SIZE > | |
1997 | (1 << 2 * ERF_DD_EVQ_IND_RPTR_WIDTH)); | |
1998 | ||
1999 | EFX_POPULATE_DWORD_2(rptr, ERF_DD_EVQ_IND_RPTR_FLAGS, | |
2000 | EFE_DD_EVQ_IND_RPTR_FLAGS_HIGH, | |
2001 | ERF_DD_EVQ_IND_RPTR, | |
2002 | (channel->eventq_read_ptr & | |
2003 | channel->eventq_mask) >> | |
2004 | ERF_DD_EVQ_IND_RPTR_WIDTH); | |
2005 | efx_writed_page(efx, &rptr, ER_DD_EVQ_INDIRECT, | |
2006 | channel->channel); | |
2007 | EFX_POPULATE_DWORD_2(rptr, ERF_DD_EVQ_IND_RPTR_FLAGS, | |
2008 | EFE_DD_EVQ_IND_RPTR_FLAGS_LOW, | |
2009 | ERF_DD_EVQ_IND_RPTR, | |
2010 | channel->eventq_read_ptr & | |
2011 | ((1 << ERF_DD_EVQ_IND_RPTR_WIDTH) - 1)); | |
2012 | efx_writed_page(efx, &rptr, ER_DD_EVQ_INDIRECT, | |
2013 | channel->channel); | |
2014 | } else { | |
2015 | EFX_POPULATE_DWORD_1(rptr, ERF_DZ_EVQ_RPTR, | |
2016 | channel->eventq_read_ptr & | |
2017 | channel->eventq_mask); | |
2018 | efx_writed_page(efx, &rptr, ER_DZ_EVQ_RPTR, channel->channel); | |
2019 | } | |
2020 | } | |
2021 | ||
2022 | static void efx_ef10_ev_test_generate(struct efx_channel *channel) | |
2023 | { | |
2024 | MCDI_DECLARE_BUF(inbuf, MC_CMD_DRIVER_EVENT_IN_LEN); | |
2025 | struct efx_nic *efx = channel->efx; | |
2026 | efx_qword_t event; | |
2027 | int rc; | |
2028 | ||
2029 | EFX_POPULATE_QWORD_2(event, | |
2030 | ESF_DZ_EV_CODE, EFX_EF10_DRVGEN_EV, | |
2031 | ESF_DZ_EV_DATA, EFX_EF10_TEST); | |
2032 | ||
2033 | MCDI_SET_DWORD(inbuf, DRIVER_EVENT_IN_EVQ, channel->channel); | |
2034 | ||
2035 | /* MCDI_SET_QWORD is not appropriate here since EFX_POPULATE_* has | |
2036 | * already swapped the data to little-endian order. | |
2037 | */ | |
2038 | memcpy(MCDI_PTR(inbuf, DRIVER_EVENT_IN_DATA), &event.u64[0], | |
2039 | sizeof(efx_qword_t)); | |
2040 | ||
2041 | rc = efx_mcdi_rpc(efx, MC_CMD_DRIVER_EVENT, inbuf, sizeof(inbuf), | |
2042 | NULL, 0, NULL); | |
2043 | if (rc != 0) | |
2044 | goto fail; | |
2045 | ||
2046 | return; | |
2047 | ||
2048 | fail: | |
2049 | WARN_ON(true); | |
2050 | netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); | |
2051 | } | |
2052 | ||
2053 | void efx_ef10_handle_drain_event(struct efx_nic *efx) | |
2054 | { | |
2055 | if (atomic_dec_and_test(&efx->active_queues)) | |
2056 | wake_up(&efx->flush_wq); | |
2057 | ||
2058 | WARN_ON(atomic_read(&efx->active_queues) < 0); | |
2059 | } | |
2060 | ||
2061 | static int efx_ef10_fini_dmaq(struct efx_nic *efx) | |
2062 | { | |
2063 | struct efx_ef10_nic_data *nic_data = efx->nic_data; | |
2064 | struct efx_channel *channel; | |
2065 | struct efx_tx_queue *tx_queue; | |
2066 | struct efx_rx_queue *rx_queue; | |
2067 | int pending; | |
2068 | ||
2069 | /* If the MC has just rebooted, the TX/RX queues will have already been | |
2070 | * torn down, but efx->active_queues needs to be set to zero. | |
2071 | */ | |
2072 | if (nic_data->must_realloc_vis) { | |
2073 | atomic_set(&efx->active_queues, 0); | |
2074 | return 0; | |
2075 | } | |
2076 | ||
2077 | /* Do not attempt to write to the NIC during EEH recovery */ | |
2078 | if (efx->state != STATE_RECOVERY) { | |
2079 | efx_for_each_channel(channel, efx) { | |
2080 | efx_for_each_channel_rx_queue(rx_queue, channel) | |
2081 | efx_ef10_rx_fini(rx_queue); | |
2082 | efx_for_each_channel_tx_queue(tx_queue, channel) | |
2083 | efx_ef10_tx_fini(tx_queue); | |
2084 | } | |
2085 | ||
2086 | wait_event_timeout(efx->flush_wq, | |
2087 | atomic_read(&efx->active_queues) == 0, | |
2088 | msecs_to_jiffies(EFX_MAX_FLUSH_TIME)); | |
2089 | pending = atomic_read(&efx->active_queues); | |
2090 | if (pending) { | |
2091 | netif_err(efx, hw, efx->net_dev, "failed to flush %d queues\n", | |
2092 | pending); | |
2093 | return -ETIMEDOUT; | |
2094 | } | |
2095 | } | |
2096 | ||
2097 | return 0; | |
2098 | } | |
2099 | ||
2100 | static bool efx_ef10_filter_equal(const struct efx_filter_spec *left, | |
2101 | const struct efx_filter_spec *right) | |
2102 | { | |
2103 | if ((left->match_flags ^ right->match_flags) | | |
2104 | ((left->flags ^ right->flags) & | |
2105 | (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_TX))) | |
2106 | return false; | |
2107 | ||
2108 | return memcmp(&left->outer_vid, &right->outer_vid, | |
2109 | sizeof(struct efx_filter_spec) - | |
2110 | offsetof(struct efx_filter_spec, outer_vid)) == 0; | |
2111 | } | |
2112 | ||
2113 | static unsigned int efx_ef10_filter_hash(const struct efx_filter_spec *spec) | |
2114 | { | |
2115 | BUILD_BUG_ON(offsetof(struct efx_filter_spec, outer_vid) & 3); | |
2116 | return jhash2((const u32 *)&spec->outer_vid, | |
2117 | (sizeof(struct efx_filter_spec) - | |
2118 | offsetof(struct efx_filter_spec, outer_vid)) / 4, | |
2119 | 0); | |
2120 | /* XXX should we randomise the initval? */ | |
2121 | } | |
2122 | ||
2123 | /* Decide whether a filter should be exclusive or else should allow | |
2124 | * delivery to additional recipients. Currently we decide that | |
2125 | * filters for specific local unicast MAC and IP addresses are | |
2126 | * exclusive. | |
2127 | */ | |
2128 | static bool efx_ef10_filter_is_exclusive(const struct efx_filter_spec *spec) | |
2129 | { | |
2130 | if (spec->match_flags & EFX_FILTER_MATCH_LOC_MAC && | |
2131 | !is_multicast_ether_addr(spec->loc_mac)) | |
2132 | return true; | |
2133 | ||
2134 | if ((spec->match_flags & | |
2135 | (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) == | |
2136 | (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) { | |
2137 | if (spec->ether_type == htons(ETH_P_IP) && | |
2138 | !ipv4_is_multicast(spec->loc_host[0])) | |
2139 | return true; | |
2140 | if (spec->ether_type == htons(ETH_P_IPV6) && | |
2141 | ((const u8 *)spec->loc_host)[0] != 0xff) | |
2142 | return true; | |
2143 | } | |
2144 | ||
2145 | return false; | |
2146 | } | |
2147 | ||
2148 | static struct efx_filter_spec * | |
2149 | efx_ef10_filter_entry_spec(const struct efx_ef10_filter_table *table, | |
2150 | unsigned int filter_idx) | |
2151 | { | |
2152 | return (struct efx_filter_spec *)(table->entry[filter_idx].spec & | |
2153 | ~EFX_EF10_FILTER_FLAGS); | |
2154 | } | |
2155 | ||
2156 | static unsigned int | |
2157 | efx_ef10_filter_entry_flags(const struct efx_ef10_filter_table *table, | |
2158 | unsigned int filter_idx) | |
2159 | { | |
2160 | return table->entry[filter_idx].spec & EFX_EF10_FILTER_FLAGS; | |
2161 | } | |
2162 | ||
2163 | static void | |
2164 | efx_ef10_filter_set_entry(struct efx_ef10_filter_table *table, | |
2165 | unsigned int filter_idx, | |
2166 | const struct efx_filter_spec *spec, | |
2167 | unsigned int flags) | |
2168 | { | |
2169 | table->entry[filter_idx].spec = (unsigned long)spec | flags; | |
2170 | } | |
2171 | ||
2172 | static void efx_ef10_filter_push_prep(struct efx_nic *efx, | |
2173 | const struct efx_filter_spec *spec, | |
2174 | efx_dword_t *inbuf, u64 handle, | |
2175 | bool replacing) | |
2176 | { | |
2177 | struct efx_ef10_nic_data *nic_data = efx->nic_data; | |
2178 | ||
2179 | memset(inbuf, 0, MC_CMD_FILTER_OP_IN_LEN); | |
2180 | ||
2181 | if (replacing) { | |
2182 | MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP, | |
2183 | MC_CMD_FILTER_OP_IN_OP_REPLACE); | |
2184 | MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE, handle); | |
2185 | } else { | |
2186 | u32 match_fields = 0; | |
2187 | ||
2188 | MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP, | |
2189 | efx_ef10_filter_is_exclusive(spec) ? | |
2190 | MC_CMD_FILTER_OP_IN_OP_INSERT : | |
2191 | MC_CMD_FILTER_OP_IN_OP_SUBSCRIBE); | |
2192 | ||
2193 | /* Convert match flags and values. Unlike almost | |
2194 | * everything else in MCDI, these fields are in | |
2195 | * network byte order. | |
2196 | */ | |
2197 | if (spec->match_flags & EFX_FILTER_MATCH_LOC_MAC_IG) | |
2198 | match_fields |= | |
2199 | is_multicast_ether_addr(spec->loc_mac) ? | |
2200 | 1 << MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_MCAST_DST_LBN : | |
2201 | 1 << MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_UCAST_DST_LBN; | |
2202 | #define COPY_FIELD(gen_flag, gen_field, mcdi_field) \ | |
2203 | if (spec->match_flags & EFX_FILTER_MATCH_ ## gen_flag) { \ | |
2204 | match_fields |= \ | |
2205 | 1 << MC_CMD_FILTER_OP_IN_MATCH_ ## \ | |
2206 | mcdi_field ## _LBN; \ | |
2207 | BUILD_BUG_ON( \ | |
2208 | MC_CMD_FILTER_OP_IN_ ## mcdi_field ## _LEN < \ | |
2209 | sizeof(spec->gen_field)); \ | |
2210 | memcpy(MCDI_PTR(inbuf, FILTER_OP_IN_ ## mcdi_field), \ | |
2211 | &spec->gen_field, sizeof(spec->gen_field)); \ | |
2212 | } | |
2213 | COPY_FIELD(REM_HOST, rem_host, SRC_IP); | |
2214 | COPY_FIELD(LOC_HOST, loc_host, DST_IP); | |
2215 | COPY_FIELD(REM_MAC, rem_mac, SRC_MAC); | |
2216 | COPY_FIELD(REM_PORT, rem_port, SRC_PORT); | |
2217 | COPY_FIELD(LOC_MAC, loc_mac, DST_MAC); | |
2218 | COPY_FIELD(LOC_PORT, loc_port, DST_PORT); | |
2219 | COPY_FIELD(ETHER_TYPE, ether_type, ETHER_TYPE); | |
2220 | COPY_FIELD(INNER_VID, inner_vid, INNER_VLAN); | |
2221 | COPY_FIELD(OUTER_VID, outer_vid, OUTER_VLAN); | |
2222 | COPY_FIELD(IP_PROTO, ip_proto, IP_PROTO); | |
2223 | #undef COPY_FIELD | |
2224 | MCDI_SET_DWORD(inbuf, FILTER_OP_IN_MATCH_FIELDS, | |
2225 | match_fields); | |
2226 | } | |
2227 | ||
2228 | MCDI_SET_DWORD(inbuf, FILTER_OP_IN_PORT_ID, EVB_PORT_ID_ASSIGNED); | |
2229 | MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_DEST, | |
2230 | spec->dmaq_id == EFX_FILTER_RX_DMAQ_ID_DROP ? | |
2231 | MC_CMD_FILTER_OP_IN_RX_DEST_DROP : | |
2232 | MC_CMD_FILTER_OP_IN_RX_DEST_HOST); | |
2233 | MCDI_SET_DWORD(inbuf, FILTER_OP_IN_TX_DEST, | |
2234 | MC_CMD_FILTER_OP_IN_TX_DEST_DEFAULT); | |
2235 | MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_QUEUE, spec->dmaq_id); | |
2236 | MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_MODE, | |
2237 | (spec->flags & EFX_FILTER_FLAG_RX_RSS) ? | |
2238 | MC_CMD_FILTER_OP_IN_RX_MODE_RSS : | |
2239 | MC_CMD_FILTER_OP_IN_RX_MODE_SIMPLE); | |
2240 | if (spec->flags & EFX_FILTER_FLAG_RX_RSS) | |
2241 | MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_CONTEXT, | |
2242 | spec->rss_context != | |
2243 | EFX_FILTER_RSS_CONTEXT_DEFAULT ? | |
2244 | spec->rss_context : nic_data->rx_rss_context); | |
2245 | } | |
2246 | ||
2247 | static int efx_ef10_filter_push(struct efx_nic *efx, | |
2248 | const struct efx_filter_spec *spec, | |
2249 | u64 *handle, bool replacing) | |
2250 | { | |
2251 | MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_IN_LEN); | |
2252 | MCDI_DECLARE_BUF(outbuf, MC_CMD_FILTER_OP_OUT_LEN); | |
2253 | int rc; | |
2254 | ||
2255 | efx_ef10_filter_push_prep(efx, spec, inbuf, *handle, replacing); | |
2256 | rc = efx_mcdi_rpc(efx, MC_CMD_FILTER_OP, inbuf, sizeof(inbuf), | |
2257 | outbuf, sizeof(outbuf), NULL); | |
2258 | if (rc == 0) | |
2259 | *handle = MCDI_QWORD(outbuf, FILTER_OP_OUT_HANDLE); | |
2260 | return rc; | |
2261 | } | |
2262 | ||
2263 | static int efx_ef10_filter_rx_match_pri(struct efx_ef10_filter_table *table, | |
2264 | enum efx_filter_match_flags match_flags) | |
2265 | { | |
2266 | unsigned int match_pri; | |
2267 | ||
2268 | for (match_pri = 0; | |
2269 | match_pri < table->rx_match_count; | |
2270 | match_pri++) | |
2271 | if (table->rx_match_flags[match_pri] == match_flags) | |
2272 | return match_pri; | |
2273 | ||
2274 | return -EPROTONOSUPPORT; | |
2275 | } | |
2276 | ||
2277 | static s32 efx_ef10_filter_insert(struct efx_nic *efx, | |
2278 | struct efx_filter_spec *spec, | |
2279 | bool replace_equal) | |
2280 | { | |
2281 | struct efx_ef10_filter_table *table = efx->filter_state; | |
2282 | DECLARE_BITMAP(mc_rem_map, EFX_EF10_FILTER_SEARCH_LIMIT); | |
2283 | struct efx_filter_spec *saved_spec; | |
2284 | unsigned int match_pri, hash; | |
2285 | unsigned int priv_flags; | |
2286 | bool replacing = false; | |
2287 | int ins_index = -1; | |
2288 | DEFINE_WAIT(wait); | |
2289 | bool is_mc_recip; | |
2290 | s32 rc; | |
2291 | ||
2292 | /* For now, only support RX filters */ | |
2293 | if ((spec->flags & (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_TX)) != | |
2294 | EFX_FILTER_FLAG_RX) | |
2295 | return -EINVAL; | |
2296 | ||
2297 | rc = efx_ef10_filter_rx_match_pri(table, spec->match_flags); | |
2298 | if (rc < 0) | |
2299 | return rc; | |
2300 | match_pri = rc; | |
2301 | ||
2302 | hash = efx_ef10_filter_hash(spec); | |
2303 | is_mc_recip = efx_filter_is_mc_recipient(spec); | |
2304 | if (is_mc_recip) | |
2305 | bitmap_zero(mc_rem_map, EFX_EF10_FILTER_SEARCH_LIMIT); | |
2306 | ||
2307 | /* Find any existing filters with the same match tuple or | |
2308 | * else a free slot to insert at. If any of them are busy, | |
2309 | * we have to wait and retry. | |
2310 | */ | |
2311 | for (;;) { | |
2312 | unsigned int depth = 1; | |
2313 | unsigned int i; | |
2314 | ||
2315 | spin_lock_bh(&efx->filter_lock); | |
2316 | ||
2317 | for (;;) { | |
2318 | i = (hash + depth) & (HUNT_FILTER_TBL_ROWS - 1); | |
2319 | saved_spec = efx_ef10_filter_entry_spec(table, i); | |
2320 | ||
2321 | if (!saved_spec) { | |
2322 | if (ins_index < 0) | |
2323 | ins_index = i; | |
2324 | } else if (efx_ef10_filter_equal(spec, saved_spec)) { | |
2325 | if (table->entry[i].spec & | |
2326 | EFX_EF10_FILTER_FLAG_BUSY) | |
2327 | break; | |
2328 | if (spec->priority < saved_spec->priority && | |
2329 | !(saved_spec->priority == | |
2330 | EFX_FILTER_PRI_REQUIRED && | |
2331 | saved_spec->flags & | |
2332 | EFX_FILTER_FLAG_RX_STACK)) { | |
2333 | rc = -EPERM; | |
2334 | goto out_unlock; | |
2335 | } | |
2336 | if (!is_mc_recip) { | |
2337 | /* This is the only one */ | |
2338 | if (spec->priority == | |
2339 | saved_spec->priority && | |
2340 | !replace_equal) { | |
2341 | rc = -EEXIST; | |
2342 | goto out_unlock; | |
2343 | } | |
2344 | ins_index = i; | |
2345 | goto found; | |
2346 | } else if (spec->priority > | |
2347 | saved_spec->priority || | |
2348 | (spec->priority == | |
2349 | saved_spec->priority && | |
2350 | replace_equal)) { | |
2351 | if (ins_index < 0) | |
2352 | ins_index = i; | |
2353 | else | |
2354 | __set_bit(depth, mc_rem_map); | |
2355 | } | |
2356 | } | |
2357 | ||
2358 | /* Once we reach the maximum search depth, use | |
2359 | * the first suitable slot or return -EBUSY if | |
2360 | * there was none | |
2361 | */ | |
2362 | if (depth == EFX_EF10_FILTER_SEARCH_LIMIT) { | |
2363 | if (ins_index < 0) { | |
2364 | rc = -EBUSY; | |
2365 | goto out_unlock; | |
2366 | } | |
2367 | goto found; | |
2368 | } | |
2369 | ||
2370 | ++depth; | |
2371 | } | |
2372 | ||
2373 | prepare_to_wait(&table->waitq, &wait, TASK_UNINTERRUPTIBLE); | |
2374 | spin_unlock_bh(&efx->filter_lock); | |
2375 | schedule(); | |
2376 | } | |
2377 | ||
2378 | found: | |
2379 | /* Create a software table entry if necessary, and mark it | |
2380 | * busy. We might yet fail to insert, but any attempt to | |
2381 | * insert a conflicting filter while we're waiting for the | |
2382 | * firmware must find the busy entry. | |
2383 | */ | |
2384 | saved_spec = efx_ef10_filter_entry_spec(table, ins_index); | |
2385 | if (saved_spec) { | |
2386 | if (spec->flags & EFX_FILTER_FLAG_RX_STACK) { | |
2387 | /* Just make sure it won't be removed */ | |
2388 | saved_spec->flags |= EFX_FILTER_FLAG_RX_STACK; | |
2389 | table->entry[ins_index].spec &= | |
2390 | ~EFX_EF10_FILTER_FLAG_STACK_OLD; | |
2391 | rc = ins_index; | |
2392 | goto out_unlock; | |
2393 | } | |
2394 | replacing = true; | |
2395 | priv_flags = efx_ef10_filter_entry_flags(table, ins_index); | |
2396 | } else { | |
2397 | saved_spec = kmalloc(sizeof(*spec), GFP_ATOMIC); | |
2398 | if (!saved_spec) { | |
2399 | rc = -ENOMEM; | |
2400 | goto out_unlock; | |
2401 | } | |
2402 | *saved_spec = *spec; | |
2403 | priv_flags = 0; | |
2404 | } | |
2405 | efx_ef10_filter_set_entry(table, ins_index, saved_spec, | |
2406 | priv_flags | EFX_EF10_FILTER_FLAG_BUSY); | |
2407 | ||
2408 | /* Mark lower-priority multicast recipients busy prior to removal */ | |
2409 | if (is_mc_recip) { | |
2410 | unsigned int depth, i; | |
2411 | ||
2412 | for (depth = 0; depth < EFX_EF10_FILTER_SEARCH_LIMIT; depth++) { | |
2413 | i = (hash + depth) & (HUNT_FILTER_TBL_ROWS - 1); | |
2414 | if (test_bit(depth, mc_rem_map)) | |
2415 | table->entry[i].spec |= | |
2416 | EFX_EF10_FILTER_FLAG_BUSY; | |
2417 | } | |
2418 | } | |
2419 | ||
2420 | spin_unlock_bh(&efx->filter_lock); | |
2421 | ||
2422 | rc = efx_ef10_filter_push(efx, spec, &table->entry[ins_index].handle, | |
2423 | replacing); | |
2424 | ||
2425 | /* Finalise the software table entry */ | |
2426 | spin_lock_bh(&efx->filter_lock); | |
2427 | if (rc == 0) { | |
2428 | if (replacing) { | |
2429 | /* Update the fields that may differ */ | |
2430 | saved_spec->priority = spec->priority; | |
2431 | saved_spec->flags &= EFX_FILTER_FLAG_RX_STACK; | |
2432 | saved_spec->flags |= spec->flags; | |
2433 | saved_spec->rss_context = spec->rss_context; | |
2434 | saved_spec->dmaq_id = spec->dmaq_id; | |
2435 | } | |
2436 | } else if (!replacing) { | |
2437 | kfree(saved_spec); | |
2438 | saved_spec = NULL; | |
2439 | } | |
2440 | efx_ef10_filter_set_entry(table, ins_index, saved_spec, priv_flags); | |
2441 | ||
2442 | /* Remove and finalise entries for lower-priority multicast | |
2443 | * recipients | |
2444 | */ | |
2445 | if (is_mc_recip) { | |
2446 | MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_IN_LEN); | |
2447 | unsigned int depth, i; | |
2448 | ||
2449 | memset(inbuf, 0, sizeof(inbuf)); | |
2450 | ||
2451 | for (depth = 0; depth < EFX_EF10_FILTER_SEARCH_LIMIT; depth++) { | |
2452 | if (!test_bit(depth, mc_rem_map)) | |
2453 | continue; | |
2454 | ||
2455 | i = (hash + depth) & (HUNT_FILTER_TBL_ROWS - 1); | |
2456 | saved_spec = efx_ef10_filter_entry_spec(table, i); | |
2457 | priv_flags = efx_ef10_filter_entry_flags(table, i); | |
2458 | ||
2459 | if (rc == 0) { | |
2460 | spin_unlock_bh(&efx->filter_lock); | |
2461 | MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP, | |
2462 | MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE); | |
2463 | MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE, | |
2464 | table->entry[i].handle); | |
2465 | rc = efx_mcdi_rpc(efx, MC_CMD_FILTER_OP, | |
2466 | inbuf, sizeof(inbuf), | |
2467 | NULL, 0, NULL); | |
2468 | spin_lock_bh(&efx->filter_lock); | |
2469 | } | |
2470 | ||
2471 | if (rc == 0) { | |
2472 | kfree(saved_spec); | |
2473 | saved_spec = NULL; | |
2474 | priv_flags = 0; | |
2475 | } else { | |
2476 | priv_flags &= ~EFX_EF10_FILTER_FLAG_BUSY; | |
2477 | } | |
2478 | efx_ef10_filter_set_entry(table, i, saved_spec, | |
2479 | priv_flags); | |
2480 | } | |
2481 | } | |
2482 | ||
2483 | /* If successful, return the inserted filter ID */ | |
2484 | if (rc == 0) | |
2485 | rc = match_pri * HUNT_FILTER_TBL_ROWS + ins_index; | |
2486 | ||
2487 | wake_up_all(&table->waitq); | |
2488 | out_unlock: | |
2489 | spin_unlock_bh(&efx->filter_lock); | |
2490 | finish_wait(&table->waitq, &wait); | |
2491 | return rc; | |
2492 | } | |
2493 | ||
9fd8095d | 2494 | static void efx_ef10_filter_update_rx_scatter(struct efx_nic *efx) |
8127d661 BH |
2495 | { |
2496 | /* no need to do anything here on EF10 */ | |
2497 | } | |
2498 | ||
2499 | /* Remove a filter. | |
2500 | * If !stack_requested, remove by ID | |
2501 | * If stack_requested, remove by index | |
2502 | * Filter ID may come from userland and must be range-checked. | |
2503 | */ | |
2504 | static int efx_ef10_filter_remove_internal(struct efx_nic *efx, | |
2505 | enum efx_filter_priority priority, | |
2506 | u32 filter_id, bool stack_requested) | |
2507 | { | |
2508 | unsigned int filter_idx = filter_id % HUNT_FILTER_TBL_ROWS; | |
2509 | struct efx_ef10_filter_table *table = efx->filter_state; | |
2510 | MCDI_DECLARE_BUF(inbuf, | |
2511 | MC_CMD_FILTER_OP_IN_HANDLE_OFST + | |
2512 | MC_CMD_FILTER_OP_IN_HANDLE_LEN); | |
2513 | struct efx_filter_spec *spec; | |
2514 | DEFINE_WAIT(wait); | |
2515 | int rc; | |
2516 | ||
2517 | /* Find the software table entry and mark it busy. Don't | |
2518 | * remove it yet; any attempt to update while we're waiting | |
2519 | * for the firmware must find the busy entry. | |
2520 | */ | |
2521 | for (;;) { | |
2522 | spin_lock_bh(&efx->filter_lock); | |
2523 | if (!(table->entry[filter_idx].spec & | |
2524 | EFX_EF10_FILTER_FLAG_BUSY)) | |
2525 | break; | |
2526 | prepare_to_wait(&table->waitq, &wait, TASK_UNINTERRUPTIBLE); | |
2527 | spin_unlock_bh(&efx->filter_lock); | |
2528 | schedule(); | |
2529 | } | |
2530 | spec = efx_ef10_filter_entry_spec(table, filter_idx); | |
2531 | if (!spec || spec->priority > priority || | |
2532 | (!stack_requested && | |
2533 | efx_ef10_filter_rx_match_pri(table, spec->match_flags) != | |
2534 | filter_id / HUNT_FILTER_TBL_ROWS)) { | |
2535 | rc = -ENOENT; | |
2536 | goto out_unlock; | |
2537 | } | |
2538 | table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_BUSY; | |
2539 | spin_unlock_bh(&efx->filter_lock); | |
2540 | ||
2541 | if (spec->flags & EFX_FILTER_FLAG_RX_STACK && !stack_requested) { | |
2542 | /* Reset steering of a stack-owned filter */ | |
2543 | ||
2544 | struct efx_filter_spec new_spec = *spec; | |
2545 | ||
2546 | new_spec.priority = EFX_FILTER_PRI_REQUIRED; | |
2547 | new_spec.flags = (EFX_FILTER_FLAG_RX | | |
2548 | EFX_FILTER_FLAG_RX_RSS | | |
2549 | EFX_FILTER_FLAG_RX_STACK); | |
2550 | new_spec.dmaq_id = 0; | |
2551 | new_spec.rss_context = EFX_FILTER_RSS_CONTEXT_DEFAULT; | |
2552 | rc = efx_ef10_filter_push(efx, &new_spec, | |
2553 | &table->entry[filter_idx].handle, | |
2554 | true); | |
2555 | ||
2556 | spin_lock_bh(&efx->filter_lock); | |
2557 | if (rc == 0) | |
2558 | *spec = new_spec; | |
2559 | } else { | |
2560 | /* Really remove the filter */ | |
2561 | ||
2562 | MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP, | |
2563 | efx_ef10_filter_is_exclusive(spec) ? | |
2564 | MC_CMD_FILTER_OP_IN_OP_REMOVE : | |
2565 | MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE); | |
2566 | MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE, | |
2567 | table->entry[filter_idx].handle); | |
2568 | rc = efx_mcdi_rpc(efx, MC_CMD_FILTER_OP, | |
2569 | inbuf, sizeof(inbuf), NULL, 0, NULL); | |
2570 | ||
2571 | spin_lock_bh(&efx->filter_lock); | |
2572 | if (rc == 0) { | |
2573 | kfree(spec); | |
2574 | efx_ef10_filter_set_entry(table, filter_idx, NULL, 0); | |
2575 | } | |
2576 | } | |
2577 | table->entry[filter_idx].spec &= ~EFX_EF10_FILTER_FLAG_BUSY; | |
2578 | wake_up_all(&table->waitq); | |
2579 | out_unlock: | |
2580 | spin_unlock_bh(&efx->filter_lock); | |
2581 | finish_wait(&table->waitq, &wait); | |
2582 | return rc; | |
2583 | } | |
2584 | ||
2585 | static int efx_ef10_filter_remove_safe(struct efx_nic *efx, | |
2586 | enum efx_filter_priority priority, | |
2587 | u32 filter_id) | |
2588 | { | |
2589 | return efx_ef10_filter_remove_internal(efx, priority, filter_id, false); | |
2590 | } | |
2591 | ||
2592 | static int efx_ef10_filter_get_safe(struct efx_nic *efx, | |
2593 | enum efx_filter_priority priority, | |
2594 | u32 filter_id, struct efx_filter_spec *spec) | |
2595 | { | |
2596 | unsigned int filter_idx = filter_id % HUNT_FILTER_TBL_ROWS; | |
2597 | struct efx_ef10_filter_table *table = efx->filter_state; | |
2598 | const struct efx_filter_spec *saved_spec; | |
2599 | int rc; | |
2600 | ||
2601 | spin_lock_bh(&efx->filter_lock); | |
2602 | saved_spec = efx_ef10_filter_entry_spec(table, filter_idx); | |
2603 | if (saved_spec && saved_spec->priority == priority && | |
2604 | efx_ef10_filter_rx_match_pri(table, saved_spec->match_flags) == | |
2605 | filter_id / HUNT_FILTER_TBL_ROWS) { | |
2606 | *spec = *saved_spec; | |
2607 | rc = 0; | |
2608 | } else { | |
2609 | rc = -ENOENT; | |
2610 | } | |
2611 | spin_unlock_bh(&efx->filter_lock); | |
2612 | return rc; | |
2613 | } | |
2614 | ||
2615 | static void efx_ef10_filter_clear_rx(struct efx_nic *efx, | |
2616 | enum efx_filter_priority priority) | |
2617 | { | |
2618 | /* TODO */ | |
2619 | } | |
2620 | ||
2621 | static u32 efx_ef10_filter_count_rx_used(struct efx_nic *efx, | |
2622 | enum efx_filter_priority priority) | |
2623 | { | |
2624 | struct efx_ef10_filter_table *table = efx->filter_state; | |
2625 | unsigned int filter_idx; | |
2626 | s32 count = 0; | |
2627 | ||
2628 | spin_lock_bh(&efx->filter_lock); | |
2629 | for (filter_idx = 0; filter_idx < HUNT_FILTER_TBL_ROWS; filter_idx++) { | |
2630 | if (table->entry[filter_idx].spec && | |
2631 | efx_ef10_filter_entry_spec(table, filter_idx)->priority == | |
2632 | priority) | |
2633 | ++count; | |
2634 | } | |
2635 | spin_unlock_bh(&efx->filter_lock); | |
2636 | return count; | |
2637 | } | |
2638 | ||
2639 | static u32 efx_ef10_filter_get_rx_id_limit(struct efx_nic *efx) | |
2640 | { | |
2641 | struct efx_ef10_filter_table *table = efx->filter_state; | |
2642 | ||
2643 | return table->rx_match_count * HUNT_FILTER_TBL_ROWS; | |
2644 | } | |
2645 | ||
2646 | static s32 efx_ef10_filter_get_rx_ids(struct efx_nic *efx, | |
2647 | enum efx_filter_priority priority, | |
2648 | u32 *buf, u32 size) | |
2649 | { | |
2650 | struct efx_ef10_filter_table *table = efx->filter_state; | |
2651 | struct efx_filter_spec *spec; | |
2652 | unsigned int filter_idx; | |
2653 | s32 count = 0; | |
2654 | ||
2655 | spin_lock_bh(&efx->filter_lock); | |
2656 | for (filter_idx = 0; filter_idx < HUNT_FILTER_TBL_ROWS; filter_idx++) { | |
2657 | spec = efx_ef10_filter_entry_spec(table, filter_idx); | |
2658 | if (spec && spec->priority == priority) { | |
2659 | if (count == size) { | |
2660 | count = -EMSGSIZE; | |
2661 | break; | |
2662 | } | |
2663 | buf[count++] = (efx_ef10_filter_rx_match_pri( | |
2664 | table, spec->match_flags) * | |
2665 | HUNT_FILTER_TBL_ROWS + | |
2666 | filter_idx); | |
2667 | } | |
2668 | } | |
2669 | spin_unlock_bh(&efx->filter_lock); | |
2670 | return count; | |
2671 | } | |
2672 | ||
2673 | #ifdef CONFIG_RFS_ACCEL | |
2674 | ||
2675 | static efx_mcdi_async_completer efx_ef10_filter_rfs_insert_complete; | |
2676 | ||
2677 | static s32 efx_ef10_filter_rfs_insert(struct efx_nic *efx, | |
2678 | struct efx_filter_spec *spec) | |
2679 | { | |
2680 | struct efx_ef10_filter_table *table = efx->filter_state; | |
2681 | MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_IN_LEN); | |
2682 | struct efx_filter_spec *saved_spec; | |
2683 | unsigned int hash, i, depth = 1; | |
2684 | bool replacing = false; | |
2685 | int ins_index = -1; | |
2686 | u64 cookie; | |
2687 | s32 rc; | |
2688 | ||
2689 | /* Must be an RX filter without RSS and not for a multicast | |
2690 | * destination address (RFS only works for connected sockets). | |
2691 | * These restrictions allow us to pass only a tiny amount of | |
2692 | * data through to the completion function. | |
2693 | */ | |
2694 | EFX_WARN_ON_PARANOID(spec->flags != | |
2695 | (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_RX_SCATTER)); | |
2696 | EFX_WARN_ON_PARANOID(spec->priority != EFX_FILTER_PRI_HINT); | |
2697 | EFX_WARN_ON_PARANOID(efx_filter_is_mc_recipient(spec)); | |
2698 | ||
2699 | hash = efx_ef10_filter_hash(spec); | |
2700 | ||
2701 | spin_lock_bh(&efx->filter_lock); | |
2702 | ||
2703 | /* Find any existing filter with the same match tuple or else | |
2704 | * a free slot to insert at. If an existing filter is busy, | |
2705 | * we have to give up. | |
2706 | */ | |
2707 | for (;;) { | |
2708 | i = (hash + depth) & (HUNT_FILTER_TBL_ROWS - 1); | |
2709 | saved_spec = efx_ef10_filter_entry_spec(table, i); | |
2710 | ||
2711 | if (!saved_spec) { | |
2712 | if (ins_index < 0) | |
2713 | ins_index = i; | |
2714 | } else if (efx_ef10_filter_equal(spec, saved_spec)) { | |
2715 | if (table->entry[i].spec & EFX_EF10_FILTER_FLAG_BUSY) { | |
2716 | rc = -EBUSY; | |
2717 | goto fail_unlock; | |
2718 | } | |
2719 | EFX_WARN_ON_PARANOID(saved_spec->flags & | |
2720 | EFX_FILTER_FLAG_RX_STACK); | |
2721 | if (spec->priority < saved_spec->priority) { | |
2722 | rc = -EPERM; | |
2723 | goto fail_unlock; | |
2724 | } | |
2725 | ins_index = i; | |
2726 | break; | |
2727 | } | |
2728 | ||
2729 | /* Once we reach the maximum search depth, use the | |
2730 | * first suitable slot or return -EBUSY if there was | |
2731 | * none | |
2732 | */ | |
2733 | if (depth == EFX_EF10_FILTER_SEARCH_LIMIT) { | |
2734 | if (ins_index < 0) { | |
2735 | rc = -EBUSY; | |
2736 | goto fail_unlock; | |
2737 | } | |
2738 | break; | |
2739 | } | |
2740 | ||
2741 | ++depth; | |
2742 | } | |
2743 | ||
2744 | /* Create a software table entry if necessary, and mark it | |
2745 | * busy. We might yet fail to insert, but any attempt to | |
2746 | * insert a conflicting filter while we're waiting for the | |
2747 | * firmware must find the busy entry. | |
2748 | */ | |
2749 | saved_spec = efx_ef10_filter_entry_spec(table, ins_index); | |
2750 | if (saved_spec) { | |
2751 | replacing = true; | |
2752 | } else { | |
2753 | saved_spec = kmalloc(sizeof(*spec), GFP_ATOMIC); | |
2754 | if (!saved_spec) { | |
2755 | rc = -ENOMEM; | |
2756 | goto fail_unlock; | |
2757 | } | |
2758 | *saved_spec = *spec; | |
2759 | } | |
2760 | efx_ef10_filter_set_entry(table, ins_index, saved_spec, | |
2761 | EFX_EF10_FILTER_FLAG_BUSY); | |
2762 | ||
2763 | spin_unlock_bh(&efx->filter_lock); | |
2764 | ||
2765 | /* Pack up the variables needed on completion */ | |
2766 | cookie = replacing << 31 | ins_index << 16 | spec->dmaq_id; | |
2767 | ||
2768 | efx_ef10_filter_push_prep(efx, spec, inbuf, | |
2769 | table->entry[ins_index].handle, replacing); | |
2770 | efx_mcdi_rpc_async(efx, MC_CMD_FILTER_OP, inbuf, sizeof(inbuf), | |
2771 | MC_CMD_FILTER_OP_OUT_LEN, | |
2772 | efx_ef10_filter_rfs_insert_complete, cookie); | |
2773 | ||
2774 | return ins_index; | |
2775 | ||
2776 | fail_unlock: | |
2777 | spin_unlock_bh(&efx->filter_lock); | |
2778 | return rc; | |
2779 | } | |
2780 | ||
2781 | static void | |
2782 | efx_ef10_filter_rfs_insert_complete(struct efx_nic *efx, unsigned long cookie, | |
2783 | int rc, efx_dword_t *outbuf, | |
2784 | size_t outlen_actual) | |
2785 | { | |
2786 | struct efx_ef10_filter_table *table = efx->filter_state; | |
2787 | unsigned int ins_index, dmaq_id; | |
2788 | struct efx_filter_spec *spec; | |
2789 | bool replacing; | |
2790 | ||
2791 | /* Unpack the cookie */ | |
2792 | replacing = cookie >> 31; | |
2793 | ins_index = (cookie >> 16) & (HUNT_FILTER_TBL_ROWS - 1); | |
2794 | dmaq_id = cookie & 0xffff; | |
2795 | ||
2796 | spin_lock_bh(&efx->filter_lock); | |
2797 | spec = efx_ef10_filter_entry_spec(table, ins_index); | |
2798 | if (rc == 0) { | |
2799 | table->entry[ins_index].handle = | |
2800 | MCDI_QWORD(outbuf, FILTER_OP_OUT_HANDLE); | |
2801 | if (replacing) | |
2802 | spec->dmaq_id = dmaq_id; | |
2803 | } else if (!replacing) { | |
2804 | kfree(spec); | |
2805 | spec = NULL; | |
2806 | } | |
2807 | efx_ef10_filter_set_entry(table, ins_index, spec, 0); | |
2808 | spin_unlock_bh(&efx->filter_lock); | |
2809 | ||
2810 | wake_up_all(&table->waitq); | |
2811 | } | |
2812 | ||
2813 | static void | |
2814 | efx_ef10_filter_rfs_expire_complete(struct efx_nic *efx, | |
2815 | unsigned long filter_idx, | |
2816 | int rc, efx_dword_t *outbuf, | |
2817 | size_t outlen_actual); | |
2818 | ||
2819 | static bool efx_ef10_filter_rfs_expire_one(struct efx_nic *efx, u32 flow_id, | |
2820 | unsigned int filter_idx) | |
2821 | { | |
2822 | struct efx_ef10_filter_table *table = efx->filter_state; | |
2823 | struct efx_filter_spec *spec = | |
2824 | efx_ef10_filter_entry_spec(table, filter_idx); | |
2825 | MCDI_DECLARE_BUF(inbuf, | |
2826 | MC_CMD_FILTER_OP_IN_HANDLE_OFST + | |
2827 | MC_CMD_FILTER_OP_IN_HANDLE_LEN); | |
2828 | ||
2829 | if (!spec || | |
2830 | (table->entry[filter_idx].spec & EFX_EF10_FILTER_FLAG_BUSY) || | |
2831 | spec->priority != EFX_FILTER_PRI_HINT || | |
2832 | !rps_may_expire_flow(efx->net_dev, spec->dmaq_id, | |
2833 | flow_id, filter_idx)) | |
2834 | return false; | |
2835 | ||
2836 | MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP, | |
2837 | MC_CMD_FILTER_OP_IN_OP_REMOVE); | |
2838 | MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE, | |
2839 | table->entry[filter_idx].handle); | |
2840 | if (efx_mcdi_rpc_async(efx, MC_CMD_FILTER_OP, inbuf, sizeof(inbuf), 0, | |
2841 | efx_ef10_filter_rfs_expire_complete, filter_idx)) | |
2842 | return false; | |
2843 | ||
2844 | table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_BUSY; | |
2845 | return true; | |
2846 | } | |
2847 | ||
2848 | static void | |
2849 | efx_ef10_filter_rfs_expire_complete(struct efx_nic *efx, | |
2850 | unsigned long filter_idx, | |
2851 | int rc, efx_dword_t *outbuf, | |
2852 | size_t outlen_actual) | |
2853 | { | |
2854 | struct efx_ef10_filter_table *table = efx->filter_state; | |
2855 | struct efx_filter_spec *spec = | |
2856 | efx_ef10_filter_entry_spec(table, filter_idx); | |
2857 | ||
2858 | spin_lock_bh(&efx->filter_lock); | |
2859 | if (rc == 0) { | |
2860 | kfree(spec); | |
2861 | efx_ef10_filter_set_entry(table, filter_idx, NULL, 0); | |
2862 | } | |
2863 | table->entry[filter_idx].spec &= ~EFX_EF10_FILTER_FLAG_BUSY; | |
2864 | wake_up_all(&table->waitq); | |
2865 | spin_unlock_bh(&efx->filter_lock); | |
2866 | } | |
2867 | ||
2868 | #endif /* CONFIG_RFS_ACCEL */ | |
2869 | ||
2870 | static int efx_ef10_filter_match_flags_from_mcdi(u32 mcdi_flags) | |
2871 | { | |
2872 | int match_flags = 0; | |
2873 | ||
2874 | #define MAP_FLAG(gen_flag, mcdi_field) { \ | |
2875 | u32 old_mcdi_flags = mcdi_flags; \ | |
2876 | mcdi_flags &= ~(1 << MC_CMD_FILTER_OP_IN_MATCH_ ## \ | |
2877 | mcdi_field ## _LBN); \ | |
2878 | if (mcdi_flags != old_mcdi_flags) \ | |
2879 | match_flags |= EFX_FILTER_MATCH_ ## gen_flag; \ | |
2880 | } | |
2881 | MAP_FLAG(LOC_MAC_IG, UNKNOWN_UCAST_DST); | |
2882 | MAP_FLAG(LOC_MAC_IG, UNKNOWN_MCAST_DST); | |
2883 | MAP_FLAG(REM_HOST, SRC_IP); | |
2884 | MAP_FLAG(LOC_HOST, DST_IP); | |
2885 | MAP_FLAG(REM_MAC, SRC_MAC); | |
2886 | MAP_FLAG(REM_PORT, SRC_PORT); | |
2887 | MAP_FLAG(LOC_MAC, DST_MAC); | |
2888 | MAP_FLAG(LOC_PORT, DST_PORT); | |
2889 | MAP_FLAG(ETHER_TYPE, ETHER_TYPE); | |
2890 | MAP_FLAG(INNER_VID, INNER_VLAN); | |
2891 | MAP_FLAG(OUTER_VID, OUTER_VLAN); | |
2892 | MAP_FLAG(IP_PROTO, IP_PROTO); | |
2893 | #undef MAP_FLAG | |
2894 | ||
2895 | /* Did we map them all? */ | |
2896 | if (mcdi_flags) | |
2897 | return -EINVAL; | |
2898 | ||
2899 | return match_flags; | |
2900 | } | |
2901 | ||
2902 | static int efx_ef10_filter_table_probe(struct efx_nic *efx) | |
2903 | { | |
2904 | MCDI_DECLARE_BUF(inbuf, MC_CMD_GET_PARSER_DISP_INFO_IN_LEN); | |
2905 | MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_PARSER_DISP_INFO_OUT_LENMAX); | |
2906 | unsigned int pd_match_pri, pd_match_count; | |
2907 | struct efx_ef10_filter_table *table; | |
2908 | size_t outlen; | |
2909 | int rc; | |
2910 | ||
2911 | table = kzalloc(sizeof(*table), GFP_KERNEL); | |
2912 | if (!table) | |
2913 | return -ENOMEM; | |
2914 | ||
2915 | /* Find out which RX filter types are supported, and their priorities */ | |
2916 | MCDI_SET_DWORD(inbuf, GET_PARSER_DISP_INFO_IN_OP, | |
2917 | MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SUPPORTED_RX_MATCHES); | |
2918 | rc = efx_mcdi_rpc(efx, MC_CMD_GET_PARSER_DISP_INFO, | |
2919 | inbuf, sizeof(inbuf), outbuf, sizeof(outbuf), | |
2920 | &outlen); | |
2921 | if (rc) | |
2922 | goto fail; | |
2923 | pd_match_count = MCDI_VAR_ARRAY_LEN( | |
2924 | outlen, GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES); | |
2925 | table->rx_match_count = 0; | |
2926 | ||
2927 | for (pd_match_pri = 0; pd_match_pri < pd_match_count; pd_match_pri++) { | |
2928 | u32 mcdi_flags = | |
2929 | MCDI_ARRAY_DWORD( | |
2930 | outbuf, | |
2931 | GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES, | |
2932 | pd_match_pri); | |
2933 | rc = efx_ef10_filter_match_flags_from_mcdi(mcdi_flags); | |
2934 | if (rc < 0) { | |
2935 | netif_dbg(efx, probe, efx->net_dev, | |
2936 | "%s: fw flags %#x pri %u not supported in driver\n", | |
2937 | __func__, mcdi_flags, pd_match_pri); | |
2938 | } else { | |
2939 | netif_dbg(efx, probe, efx->net_dev, | |
2940 | "%s: fw flags %#x pri %u supported as driver flags %#x pri %u\n", | |
2941 | __func__, mcdi_flags, pd_match_pri, | |
2942 | rc, table->rx_match_count); | |
2943 | table->rx_match_flags[table->rx_match_count++] = rc; | |
2944 | } | |
2945 | } | |
2946 | ||
2947 | table->entry = vzalloc(HUNT_FILTER_TBL_ROWS * sizeof(*table->entry)); | |
2948 | if (!table->entry) { | |
2949 | rc = -ENOMEM; | |
2950 | goto fail; | |
2951 | } | |
2952 | ||
2953 | efx->filter_state = table; | |
2954 | init_waitqueue_head(&table->waitq); | |
2955 | return 0; | |
2956 | ||
2957 | fail: | |
2958 | kfree(table); | |
2959 | return rc; | |
2960 | } | |
2961 | ||
2962 | static void efx_ef10_filter_table_restore(struct efx_nic *efx) | |
2963 | { | |
2964 | struct efx_ef10_filter_table *table = efx->filter_state; | |
2965 | struct efx_ef10_nic_data *nic_data = efx->nic_data; | |
2966 | struct efx_filter_spec *spec; | |
2967 | unsigned int filter_idx; | |
2968 | bool failed = false; | |
2969 | int rc; | |
2970 | ||
2971 | if (!nic_data->must_restore_filters) | |
2972 | return; | |
2973 | ||
2974 | spin_lock_bh(&efx->filter_lock); | |
2975 | ||
2976 | for (filter_idx = 0; filter_idx < HUNT_FILTER_TBL_ROWS; filter_idx++) { | |
2977 | spec = efx_ef10_filter_entry_spec(table, filter_idx); | |
2978 | if (!spec) | |
2979 | continue; | |
2980 | ||
2981 | table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_BUSY; | |
2982 | spin_unlock_bh(&efx->filter_lock); | |
2983 | ||
2984 | rc = efx_ef10_filter_push(efx, spec, | |
2985 | &table->entry[filter_idx].handle, | |
2986 | false); | |
2987 | if (rc) | |
2988 | failed = true; | |
2989 | ||
2990 | spin_lock_bh(&efx->filter_lock); | |
2991 | if (rc) { | |
2992 | kfree(spec); | |
2993 | efx_ef10_filter_set_entry(table, filter_idx, NULL, 0); | |
2994 | } else { | |
2995 | table->entry[filter_idx].spec &= | |
2996 | ~EFX_EF10_FILTER_FLAG_BUSY; | |
2997 | } | |
2998 | } | |
2999 | ||
3000 | spin_unlock_bh(&efx->filter_lock); | |
3001 | ||
3002 | if (failed) | |
3003 | netif_err(efx, hw, efx->net_dev, | |
3004 | "unable to restore all filters\n"); | |
3005 | else | |
3006 | nic_data->must_restore_filters = false; | |
3007 | } | |
3008 | ||
3009 | static void efx_ef10_filter_table_remove(struct efx_nic *efx) | |
3010 | { | |
3011 | struct efx_ef10_filter_table *table = efx->filter_state; | |
3012 | MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_IN_LEN); | |
3013 | struct efx_filter_spec *spec; | |
3014 | unsigned int filter_idx; | |
3015 | int rc; | |
3016 | ||
3017 | for (filter_idx = 0; filter_idx < HUNT_FILTER_TBL_ROWS; filter_idx++) { | |
3018 | spec = efx_ef10_filter_entry_spec(table, filter_idx); | |
3019 | if (!spec) | |
3020 | continue; | |
3021 | ||
3022 | MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP, | |
3023 | efx_ef10_filter_is_exclusive(spec) ? | |
3024 | MC_CMD_FILTER_OP_IN_OP_REMOVE : | |
3025 | MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE); | |
3026 | MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE, | |
3027 | table->entry[filter_idx].handle); | |
3028 | rc = efx_mcdi_rpc(efx, MC_CMD_FILTER_OP, inbuf, sizeof(inbuf), | |
3029 | NULL, 0, NULL); | |
3030 | ||
3031 | WARN_ON(rc != 0); | |
3032 | kfree(spec); | |
3033 | } | |
3034 | ||
3035 | vfree(table->entry); | |
3036 | kfree(table); | |
3037 | } | |
3038 | ||
3039 | static void efx_ef10_filter_sync_rx_mode(struct efx_nic *efx) | |
3040 | { | |
3041 | struct efx_ef10_filter_table *table = efx->filter_state; | |
3042 | struct net_device *net_dev = efx->net_dev; | |
3043 | struct efx_filter_spec spec; | |
3044 | bool remove_failed = false; | |
3045 | struct netdev_hw_addr *uc; | |
3046 | struct netdev_hw_addr *mc; | |
3047 | unsigned int filter_idx; | |
3048 | int i, n, rc; | |
3049 | ||
3050 | if (!efx_dev_registered(efx)) | |
3051 | return; | |
3052 | ||
3053 | /* Mark old filters that may need to be removed */ | |
3054 | spin_lock_bh(&efx->filter_lock); | |
3055 | n = table->stack_uc_count < 0 ? 1 : table->stack_uc_count; | |
3056 | for (i = 0; i < n; i++) { | |
3057 | filter_idx = table->stack_uc_list[i].id % HUNT_FILTER_TBL_ROWS; | |
3058 | table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_STACK_OLD; | |
3059 | } | |
3060 | n = table->stack_mc_count < 0 ? 1 : table->stack_mc_count; | |
3061 | for (i = 0; i < n; i++) { | |
3062 | filter_idx = table->stack_mc_list[i].id % HUNT_FILTER_TBL_ROWS; | |
3063 | table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_STACK_OLD; | |
3064 | } | |
3065 | spin_unlock_bh(&efx->filter_lock); | |
3066 | ||
3067 | /* Copy/convert the address lists; add the primary station | |
3068 | * address and broadcast address | |
3069 | */ | |
3070 | netif_addr_lock_bh(net_dev); | |
3071 | if (net_dev->flags & IFF_PROMISC || | |
3072 | netdev_uc_count(net_dev) >= EFX_EF10_FILTER_STACK_UC_MAX) { | |
3073 | table->stack_uc_count = -1; | |
3074 | } else { | |
3075 | table->stack_uc_count = 1 + netdev_uc_count(net_dev); | |
3076 | memcpy(table->stack_uc_list[0].addr, net_dev->dev_addr, | |
3077 | ETH_ALEN); | |
3078 | i = 1; | |
3079 | netdev_for_each_uc_addr(uc, net_dev) { | |
3080 | memcpy(table->stack_uc_list[i].addr, | |
3081 | uc->addr, ETH_ALEN); | |
3082 | i++; | |
3083 | } | |
3084 | } | |
3085 | if (net_dev->flags & (IFF_PROMISC | IFF_ALLMULTI) || | |
3086 | netdev_mc_count(net_dev) >= EFX_EF10_FILTER_STACK_MC_MAX) { | |
3087 | table->stack_mc_count = -1; | |
3088 | } else { | |
3089 | table->stack_mc_count = 1 + netdev_mc_count(net_dev); | |
3090 | eth_broadcast_addr(table->stack_mc_list[0].addr); | |
3091 | i = 1; | |
3092 | netdev_for_each_mc_addr(mc, net_dev) { | |
3093 | memcpy(table->stack_mc_list[i].addr, | |
3094 | mc->addr, ETH_ALEN); | |
3095 | i++; | |
3096 | } | |
3097 | } | |
3098 | netif_addr_unlock_bh(net_dev); | |
3099 | ||
3100 | /* Insert/renew unicast filters */ | |
3101 | if (table->stack_uc_count >= 0) { | |
3102 | for (i = 0; i < table->stack_uc_count; i++) { | |
3103 | efx_filter_init_rx(&spec, EFX_FILTER_PRI_REQUIRED, | |
3104 | EFX_FILTER_FLAG_RX_RSS | | |
3105 | EFX_FILTER_FLAG_RX_STACK, | |
3106 | 0); | |
3107 | efx_filter_set_eth_local(&spec, EFX_FILTER_VID_UNSPEC, | |
3108 | table->stack_uc_list[i].addr); | |
3109 | rc = efx_ef10_filter_insert(efx, &spec, true); | |
3110 | if (rc < 0) { | |
3111 | /* Fall back to unicast-promisc */ | |
3112 | while (i--) | |
3113 | efx_ef10_filter_remove_safe( | |
3114 | efx, EFX_FILTER_PRI_REQUIRED, | |
3115 | table->stack_uc_list[i].id); | |
3116 | table->stack_uc_count = -1; | |
3117 | break; | |
3118 | } | |
3119 | table->stack_uc_list[i].id = rc; | |
3120 | } | |
3121 | } | |
3122 | if (table->stack_uc_count < 0) { | |
3123 | efx_filter_init_rx(&spec, EFX_FILTER_PRI_REQUIRED, | |
3124 | EFX_FILTER_FLAG_RX_RSS | | |
3125 | EFX_FILTER_FLAG_RX_STACK, | |
3126 | 0); | |
3127 | efx_filter_set_uc_def(&spec); | |
3128 | rc = efx_ef10_filter_insert(efx, &spec, true); | |
3129 | if (rc < 0) { | |
3130 | WARN_ON(1); | |
3131 | table->stack_uc_count = 0; | |
3132 | } else { | |
3133 | table->stack_uc_list[0].id = rc; | |
3134 | } | |
3135 | } | |
3136 | ||
3137 | /* Insert/renew multicast filters */ | |
3138 | if (table->stack_mc_count >= 0) { | |
3139 | for (i = 0; i < table->stack_mc_count; i++) { | |
3140 | efx_filter_init_rx(&spec, EFX_FILTER_PRI_REQUIRED, | |
3141 | EFX_FILTER_FLAG_RX_RSS | | |
3142 | EFX_FILTER_FLAG_RX_STACK, | |
3143 | 0); | |
3144 | efx_filter_set_eth_local(&spec, EFX_FILTER_VID_UNSPEC, | |
3145 | table->stack_mc_list[i].addr); | |
3146 | rc = efx_ef10_filter_insert(efx, &spec, true); | |
3147 | if (rc < 0) { | |
3148 | /* Fall back to multicast-promisc */ | |
3149 | while (i--) | |
3150 | efx_ef10_filter_remove_safe( | |
3151 | efx, EFX_FILTER_PRI_REQUIRED, | |
3152 | table->stack_mc_list[i].id); | |
3153 | table->stack_mc_count = -1; | |
3154 | break; | |
3155 | } | |
3156 | table->stack_mc_list[i].id = rc; | |
3157 | } | |
3158 | } | |
3159 | if (table->stack_mc_count < 0) { | |
3160 | efx_filter_init_rx(&spec, EFX_FILTER_PRI_REQUIRED, | |
3161 | EFX_FILTER_FLAG_RX_RSS | | |
3162 | EFX_FILTER_FLAG_RX_STACK, | |
3163 | 0); | |
3164 | efx_filter_set_mc_def(&spec); | |
3165 | rc = efx_ef10_filter_insert(efx, &spec, true); | |
3166 | if (rc < 0) { | |
3167 | WARN_ON(1); | |
3168 | table->stack_mc_count = 0; | |
3169 | } else { | |
3170 | table->stack_mc_list[0].id = rc; | |
3171 | } | |
3172 | } | |
3173 | ||
3174 | /* Remove filters that weren't renewed. Since nothing else | |
3175 | * changes the STACK_OLD flag or removes these filters, we | |
3176 | * don't need to hold the filter_lock while scanning for | |
3177 | * these filters. | |
3178 | */ | |
3179 | for (i = 0; i < HUNT_FILTER_TBL_ROWS; i++) { | |
3180 | if (ACCESS_ONCE(table->entry[i].spec) & | |
3181 | EFX_EF10_FILTER_FLAG_STACK_OLD) { | |
3182 | if (efx_ef10_filter_remove_internal(efx, | |
3183 | EFX_FILTER_PRI_REQUIRED, | |
3184 | i, true) < 0) | |
3185 | remove_failed = true; | |
3186 | } | |
3187 | } | |
3188 | WARN_ON(remove_failed); | |
3189 | } | |
3190 | ||
3191 | static int efx_ef10_mac_reconfigure(struct efx_nic *efx) | |
3192 | { | |
3193 | efx_ef10_filter_sync_rx_mode(efx); | |
3194 | ||
3195 | return efx_mcdi_set_mac(efx); | |
3196 | } | |
3197 | ||
3198 | #ifdef CONFIG_SFC_MTD | |
3199 | ||
3200 | struct efx_ef10_nvram_type_info { | |
3201 | u16 type, type_mask; | |
3202 | u8 port; | |
3203 | const char *name; | |
3204 | }; | |
3205 | ||
3206 | static const struct efx_ef10_nvram_type_info efx_ef10_nvram_types[] = { | |
3207 | { NVRAM_PARTITION_TYPE_MC_FIRMWARE, 0, 0, "sfc_mcfw" }, | |
3208 | { NVRAM_PARTITION_TYPE_MC_FIRMWARE_BACKUP, 0, 0, "sfc_mcfw_backup" }, | |
3209 | { NVRAM_PARTITION_TYPE_EXPANSION_ROM, 0, 0, "sfc_exp_rom" }, | |
3210 | { NVRAM_PARTITION_TYPE_STATIC_CONFIG, 0, 0, "sfc_static_cfg" }, | |
3211 | { NVRAM_PARTITION_TYPE_DYNAMIC_CONFIG, 0, 0, "sfc_dynamic_cfg" }, | |
3212 | { NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT0, 0, 0, "sfc_exp_rom_cfg" }, | |
3213 | { NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT1, 0, 1, "sfc_exp_rom_cfg" }, | |
3214 | { NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT2, 0, 2, "sfc_exp_rom_cfg" }, | |
3215 | { NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT3, 0, 3, "sfc_exp_rom_cfg" }, | |
3216 | { NVRAM_PARTITION_TYPE_PHY_MIN, 0xff, 0, "sfc_phy_fw" }, | |
3217 | }; | |
3218 | ||
3219 | static int efx_ef10_mtd_probe_partition(struct efx_nic *efx, | |
3220 | struct efx_mcdi_mtd_partition *part, | |
3221 | unsigned int type) | |
3222 | { | |
3223 | MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_METADATA_IN_LEN); | |
3224 | MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_METADATA_OUT_LENMAX); | |
3225 | const struct efx_ef10_nvram_type_info *info; | |
3226 | size_t size, erase_size, outlen; | |
3227 | bool protected; | |
3228 | int rc; | |
3229 | ||
3230 | for (info = efx_ef10_nvram_types; ; info++) { | |
3231 | if (info == | |
3232 | efx_ef10_nvram_types + ARRAY_SIZE(efx_ef10_nvram_types)) | |
3233 | return -ENODEV; | |
3234 | if ((type & ~info->type_mask) == info->type) | |
3235 | break; | |
3236 | } | |
3237 | if (info->port != efx_port_num(efx)) | |
3238 | return -ENODEV; | |
3239 | ||
3240 | rc = efx_mcdi_nvram_info(efx, type, &size, &erase_size, &protected); | |
3241 | if (rc) | |
3242 | return rc; | |
3243 | if (protected) | |
3244 | return -ENODEV; /* hide it */ | |
3245 | ||
3246 | part->nvram_type = type; | |
3247 | ||
3248 | MCDI_SET_DWORD(inbuf, NVRAM_METADATA_IN_TYPE, type); | |
3249 | rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_METADATA, inbuf, sizeof(inbuf), | |
3250 | outbuf, sizeof(outbuf), &outlen); | |
3251 | if (rc) | |
3252 | return rc; | |
3253 | if (outlen < MC_CMD_NVRAM_METADATA_OUT_LENMIN) | |
3254 | return -EIO; | |
3255 | if (MCDI_DWORD(outbuf, NVRAM_METADATA_OUT_FLAGS) & | |
3256 | (1 << MC_CMD_NVRAM_METADATA_OUT_SUBTYPE_VALID_LBN)) | |
3257 | part->fw_subtype = MCDI_DWORD(outbuf, | |
3258 | NVRAM_METADATA_OUT_SUBTYPE); | |
3259 | ||
3260 | part->common.dev_type_name = "EF10 NVRAM manager"; | |
3261 | part->common.type_name = info->name; | |
3262 | ||
3263 | part->common.mtd.type = MTD_NORFLASH; | |
3264 | part->common.mtd.flags = MTD_CAP_NORFLASH; | |
3265 | part->common.mtd.size = size; | |
3266 | part->common.mtd.erasesize = erase_size; | |
3267 | ||
3268 | return 0; | |
3269 | } | |
3270 | ||
3271 | static int efx_ef10_mtd_probe(struct efx_nic *efx) | |
3272 | { | |
3273 | MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_PARTITIONS_OUT_LENMAX); | |
3274 | struct efx_mcdi_mtd_partition *parts; | |
3275 | size_t outlen, n_parts_total, i, n_parts; | |
3276 | unsigned int type; | |
3277 | int rc; | |
3278 | ||
3279 | ASSERT_RTNL(); | |
3280 | ||
3281 | BUILD_BUG_ON(MC_CMD_NVRAM_PARTITIONS_IN_LEN != 0); | |
3282 | rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_PARTITIONS, NULL, 0, | |
3283 | outbuf, sizeof(outbuf), &outlen); | |
3284 | if (rc) | |
3285 | return rc; | |
3286 | if (outlen < MC_CMD_NVRAM_PARTITIONS_OUT_LENMIN) | |
3287 | return -EIO; | |
3288 | ||
3289 | n_parts_total = MCDI_DWORD(outbuf, NVRAM_PARTITIONS_OUT_NUM_PARTITIONS); | |
3290 | if (n_parts_total > | |
3291 | MCDI_VAR_ARRAY_LEN(outlen, NVRAM_PARTITIONS_OUT_TYPE_ID)) | |
3292 | return -EIO; | |
3293 | ||
3294 | parts = kcalloc(n_parts_total, sizeof(*parts), GFP_KERNEL); | |
3295 | if (!parts) | |
3296 | return -ENOMEM; | |
3297 | ||
3298 | n_parts = 0; | |
3299 | for (i = 0; i < n_parts_total; i++) { | |
3300 | type = MCDI_ARRAY_DWORD(outbuf, NVRAM_PARTITIONS_OUT_TYPE_ID, | |
3301 | i); | |
3302 | rc = efx_ef10_mtd_probe_partition(efx, &parts[n_parts], type); | |
3303 | if (rc == 0) | |
3304 | n_parts++; | |
3305 | else if (rc != -ENODEV) | |
3306 | goto fail; | |
3307 | } | |
3308 | ||
3309 | rc = efx_mtd_add(efx, &parts[0].common, n_parts, sizeof(*parts)); | |
3310 | fail: | |
3311 | if (rc) | |
3312 | kfree(parts); | |
3313 | return rc; | |
3314 | } | |
3315 | ||
3316 | #endif /* CONFIG_SFC_MTD */ | |
3317 | ||
3318 | static void efx_ef10_ptp_write_host_time(struct efx_nic *efx, u32 host_time) | |
3319 | { | |
3320 | _efx_writed(efx, cpu_to_le32(host_time), ER_DZ_MC_DB_LWRD); | |
3321 | } | |
3322 | ||
3323 | const struct efx_nic_type efx_hunt_a0_nic_type = { | |
3324 | .mem_map_size = efx_ef10_mem_map_size, | |
3325 | .probe = efx_ef10_probe, | |
3326 | .remove = efx_ef10_remove, | |
3327 | .dimension_resources = efx_ef10_dimension_resources, | |
3328 | .init = efx_ef10_init_nic, | |
3329 | .fini = efx_port_dummy_op_void, | |
3330 | .map_reset_reason = efx_mcdi_map_reset_reason, | |
3331 | .map_reset_flags = efx_ef10_map_reset_flags, | |
3332 | .reset = efx_mcdi_reset, | |
3333 | .probe_port = efx_mcdi_port_probe, | |
3334 | .remove_port = efx_mcdi_port_remove, | |
3335 | .fini_dmaq = efx_ef10_fini_dmaq, | |
3336 | .describe_stats = efx_ef10_describe_stats, | |
3337 | .update_stats = efx_ef10_update_stats, | |
3338 | .start_stats = efx_mcdi_mac_start_stats, | |
3339 | .stop_stats = efx_mcdi_mac_stop_stats, | |
3340 | .set_id_led = efx_mcdi_set_id_led, | |
3341 | .push_irq_moderation = efx_ef10_push_irq_moderation, | |
3342 | .reconfigure_mac = efx_ef10_mac_reconfigure, | |
3343 | .check_mac_fault = efx_mcdi_mac_check_fault, | |
3344 | .reconfigure_port = efx_mcdi_port_reconfigure, | |
3345 | .get_wol = efx_ef10_get_wol, | |
3346 | .set_wol = efx_ef10_set_wol, | |
3347 | .resume_wol = efx_port_dummy_op_void, | |
3348 | /* TODO: test_chip */ | |
3349 | .test_nvram = efx_mcdi_nvram_test_all, | |
3350 | .mcdi_request = efx_ef10_mcdi_request, | |
3351 | .mcdi_poll_response = efx_ef10_mcdi_poll_response, | |
3352 | .mcdi_read_response = efx_ef10_mcdi_read_response, | |
3353 | .mcdi_poll_reboot = efx_ef10_mcdi_poll_reboot, | |
3354 | .irq_enable_master = efx_port_dummy_op_void, | |
3355 | .irq_test_generate = efx_ef10_irq_test_generate, | |
3356 | .irq_disable_non_ev = efx_port_dummy_op_void, | |
3357 | .irq_handle_msi = efx_ef10_msi_interrupt, | |
3358 | .irq_handle_legacy = efx_ef10_legacy_interrupt, | |
3359 | .tx_probe = efx_ef10_tx_probe, | |
3360 | .tx_init = efx_ef10_tx_init, | |
3361 | .tx_remove = efx_ef10_tx_remove, | |
3362 | .tx_write = efx_ef10_tx_write, | |
3363 | .rx_push_indir_table = efx_ef10_rx_push_indir_table, | |
3364 | .rx_probe = efx_ef10_rx_probe, | |
3365 | .rx_init = efx_ef10_rx_init, | |
3366 | .rx_remove = efx_ef10_rx_remove, | |
3367 | .rx_write = efx_ef10_rx_write, | |
3368 | .rx_defer_refill = efx_ef10_rx_defer_refill, | |
3369 | .ev_probe = efx_ef10_ev_probe, | |
3370 | .ev_init = efx_ef10_ev_init, | |
3371 | .ev_fini = efx_ef10_ev_fini, | |
3372 | .ev_remove = efx_ef10_ev_remove, | |
3373 | .ev_process = efx_ef10_ev_process, | |
3374 | .ev_read_ack = efx_ef10_ev_read_ack, | |
3375 | .ev_test_generate = efx_ef10_ev_test_generate, | |
3376 | .filter_table_probe = efx_ef10_filter_table_probe, | |
3377 | .filter_table_restore = efx_ef10_filter_table_restore, | |
3378 | .filter_table_remove = efx_ef10_filter_table_remove, | |
3379 | .filter_update_rx_scatter = efx_ef10_filter_update_rx_scatter, | |
3380 | .filter_insert = efx_ef10_filter_insert, | |
3381 | .filter_remove_safe = efx_ef10_filter_remove_safe, | |
3382 | .filter_get_safe = efx_ef10_filter_get_safe, | |
3383 | .filter_clear_rx = efx_ef10_filter_clear_rx, | |
3384 | .filter_count_rx_used = efx_ef10_filter_count_rx_used, | |
3385 | .filter_get_rx_id_limit = efx_ef10_filter_get_rx_id_limit, | |
3386 | .filter_get_rx_ids = efx_ef10_filter_get_rx_ids, | |
3387 | #ifdef CONFIG_RFS_ACCEL | |
3388 | .filter_rfs_insert = efx_ef10_filter_rfs_insert, | |
3389 | .filter_rfs_expire_one = efx_ef10_filter_rfs_expire_one, | |
3390 | #endif | |
3391 | #ifdef CONFIG_SFC_MTD | |
3392 | .mtd_probe = efx_ef10_mtd_probe, | |
3393 | .mtd_rename = efx_mcdi_mtd_rename, | |
3394 | .mtd_read = efx_mcdi_mtd_read, | |
3395 | .mtd_erase = efx_mcdi_mtd_erase, | |
3396 | .mtd_write = efx_mcdi_mtd_write, | |
3397 | .mtd_sync = efx_mcdi_mtd_sync, | |
3398 | #endif | |
3399 | .ptp_write_host_time = efx_ef10_ptp_write_host_time, | |
3400 | ||
3401 | .revision = EFX_REV_HUNT_A0, | |
3402 | .max_dma_mask = DMA_BIT_MASK(ESF_DZ_TX_KER_BUF_ADDR_WIDTH), | |
3403 | .rx_prefix_size = ES_DZ_RX_PREFIX_SIZE, | |
3404 | .rx_hash_offset = ES_DZ_RX_PREFIX_HASH_OFST, | |
3405 | .can_rx_scatter = true, | |
3406 | .always_rx_scatter = true, | |
3407 | .max_interrupt_mode = EFX_INT_MODE_MSIX, | |
3408 | .timer_period_max = 1 << ERF_DD_EVQ_IND_TIMER_VAL_WIDTH, | |
3409 | .offload_features = (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | | |
3410 | NETIF_F_RXHASH | NETIF_F_NTUPLE), | |
3411 | .mcdi_max_ver = 2, | |
3412 | .max_rx_ip_filters = HUNT_FILTER_TBL_ROWS, | |
3413 | }; |