Commit | Line | Data |
---|---|---|
d2912cb1 | 1 | // SPDX-License-Identifier: GPL-2.0-only |
5a6681e2 EC |
2 | /**************************************************************************** |
3 | * Driver for Solarflare network controllers and boards | |
4 | * Copyright 2005-2006 Fen Systems Ltd. | |
5 | * Copyright 2006-2012 Solarflare Communications Inc. | |
5a6681e2 EC |
6 | */ |
7 | ||
8 | #include <linux/netdevice.h> | |
9 | #include <linux/module.h> | |
10 | #include <linux/delay.h> | |
11 | #include <linux/kernel_stat.h> | |
12 | #include <linux/pci.h> | |
13 | #include <linux/ethtool.h> | |
14 | #include <linux/ip.h> | |
15 | #include <linux/in.h> | |
16 | #include <linux/udp.h> | |
17 | #include <linux/rtnetlink.h> | |
18 | #include <linux/slab.h> | |
19 | #include "net_driver.h" | |
20 | #include "efx.h" | |
21 | #include "nic.h" | |
22 | #include "selftest.h" | |
23 | #include "workarounds.h" | |
24 | ||
25 | /* IRQ latency can be enormous because: | |
26 | * - All IRQs may be disabled on a CPU for a *long* time by e.g. a | |
27 | * slow serial console or an old IDE driver doing error recovery | |
28 | * - The PREEMPT_RT patches mostly deal with this, but also allow a | |
29 | * tasklet or normal task to be given higher priority than our IRQ | |
30 | * threads | |
31 | * Try to avoid blaming the hardware for this. | |
32 | */ | |
33 | #define IRQ_TIMEOUT HZ | |
34 | ||
35 | /* | |
36 | * Loopback test packet structure | |
37 | * | |
38 | * The self-test should stress every RSS vector, and unfortunately | |
39 | * Falcon only performs RSS on TCP/UDP packets. | |
40 | */ | |
41 | struct ef4_loopback_payload { | |
1186c6b3 | 42 | char pad[2]; /* Ensures ip is 4-byte aligned */ |
55c1528f EC |
43 | struct_group_attr(packet, __packed, |
44 | struct ethhdr header; | |
45 | struct iphdr ip; | |
46 | struct udphdr udp; | |
47 | __be16 iteration; | |
48 | char msg[64]; | |
49 | ); | |
1186c6b3 | 50 | } __packed __aligned(4); |
55c1528f EC |
51 | #define EF4_LOOPBACK_PAYLOAD_LEN \ |
52 | sizeof_field(struct ef4_loopback_payload, packet) | |
5a6681e2 EC |
53 | |
54 | /* Loopback test source MAC address */ | |
55 | static const u8 payload_source[ETH_ALEN] __aligned(2) = { | |
56 | 0x00, 0x0f, 0x53, 0x1b, 0x1b, 0x1b, | |
57 | }; | |
58 | ||
59 | static const char payload_msg[] = | |
60 | "Hello world! This is an Efx loopback test in progress!"; | |
61 | ||
62 | /* Interrupt mode names */ | |
63 | static const unsigned int ef4_interrupt_mode_max = EF4_INT_MODE_MAX; | |
64 | static const char *const ef4_interrupt_mode_names[] = { | |
65 | [EF4_INT_MODE_MSIX] = "MSI-X", | |
66 | [EF4_INT_MODE_MSI] = "MSI", | |
67 | [EF4_INT_MODE_LEGACY] = "legacy", | |
68 | }; | |
69 | #define INT_MODE(efx) \ | |
70 | STRING_TABLE_LOOKUP(efx->interrupt_mode, ef4_interrupt_mode) | |
71 | ||
72 | /** | |
d0ea5cbd | 73 | * struct ef4_loopback_state - persistent state during a loopback selftest |
5a6681e2 EC |
74 | * @flush: Drop all packets in ef4_loopback_rx_packet |
75 | * @packet_count: Number of packets being used in this test | |
76 | * @skbs: An array of skbs transmitted | |
77 | * @offload_csum: Checksums are being offloaded | |
78 | * @rx_good: RX good packet count | |
79 | * @rx_bad: RX bad packet count | |
80 | * @payload: Payload used in tests | |
81 | */ | |
82 | struct ef4_loopback_state { | |
83 | bool flush; | |
84 | int packet_count; | |
85 | struct sk_buff **skbs; | |
86 | bool offload_csum; | |
87 | atomic_t rx_good; | |
88 | atomic_t rx_bad; | |
89 | struct ef4_loopback_payload payload; | |
90 | }; | |
91 | ||
92 | /* How long to wait for all the packets to arrive (in ms) */ | |
93 | #define LOOPBACK_TIMEOUT_MS 1000 | |
94 | ||
95 | /************************************************************************** | |
96 | * | |
97 | * MII, NVRAM and register tests | |
98 | * | |
99 | **************************************************************************/ | |
100 | ||
101 | static int ef4_test_phy_alive(struct ef4_nic *efx, struct ef4_self_tests *tests) | |
102 | { | |
103 | int rc = 0; | |
104 | ||
105 | if (efx->phy_op->test_alive) { | |
106 | rc = efx->phy_op->test_alive(efx); | |
107 | tests->phy_alive = rc ? -1 : 1; | |
108 | } | |
109 | ||
110 | return rc; | |
111 | } | |
112 | ||
113 | static int ef4_test_nvram(struct ef4_nic *efx, struct ef4_self_tests *tests) | |
114 | { | |
115 | int rc = 0; | |
116 | ||
117 | if (efx->type->test_nvram) { | |
118 | rc = efx->type->test_nvram(efx); | |
119 | if (rc == -EPERM) | |
120 | rc = 0; | |
121 | else | |
122 | tests->nvram = rc ? -1 : 1; | |
123 | } | |
124 | ||
125 | return rc; | |
126 | } | |
127 | ||
128 | /************************************************************************** | |
129 | * | |
130 | * Interrupt and event queue testing | |
131 | * | |
132 | **************************************************************************/ | |
133 | ||
134 | /* Test generation and receipt of interrupts */ | |
135 | static int ef4_test_interrupts(struct ef4_nic *efx, | |
136 | struct ef4_self_tests *tests) | |
137 | { | |
138 | unsigned long timeout, wait; | |
139 | int cpu; | |
140 | int rc; | |
141 | ||
142 | netif_dbg(efx, drv, efx->net_dev, "testing interrupts\n"); | |
143 | tests->interrupt = -1; | |
144 | ||
145 | rc = ef4_nic_irq_test_start(efx); | |
146 | if (rc == -ENOTSUPP) { | |
147 | netif_dbg(efx, drv, efx->net_dev, | |
148 | "direct interrupt testing not supported\n"); | |
149 | tests->interrupt = 0; | |
150 | return 0; | |
151 | } | |
152 | ||
153 | timeout = jiffies + IRQ_TIMEOUT; | |
154 | wait = 1; | |
155 | ||
156 | /* Wait for arrival of test interrupt. */ | |
157 | netif_dbg(efx, drv, efx->net_dev, "waiting for test interrupt\n"); | |
158 | do { | |
159 | schedule_timeout_uninterruptible(wait); | |
160 | cpu = ef4_nic_irq_test_irq_cpu(efx); | |
161 | if (cpu >= 0) | |
162 | goto success; | |
163 | wait *= 2; | |
164 | } while (time_before(jiffies, timeout)); | |
165 | ||
166 | netif_err(efx, drv, efx->net_dev, "timed out waiting for interrupt\n"); | |
167 | return -ETIMEDOUT; | |
168 | ||
169 | success: | |
170 | netif_dbg(efx, drv, efx->net_dev, "%s test interrupt seen on CPU%d\n", | |
171 | INT_MODE(efx), cpu); | |
172 | tests->interrupt = 1; | |
173 | return 0; | |
174 | } | |
175 | ||
176 | /* Test generation and receipt of interrupting events */ | |
177 | static int ef4_test_eventq_irq(struct ef4_nic *efx, | |
178 | struct ef4_self_tests *tests) | |
179 | { | |
180 | struct ef4_channel *channel; | |
181 | unsigned int read_ptr[EF4_MAX_CHANNELS]; | |
182 | unsigned long napi_ran = 0, dma_pend = 0, int_pend = 0; | |
183 | unsigned long timeout, wait; | |
184 | ||
185 | BUILD_BUG_ON(EF4_MAX_CHANNELS > BITS_PER_LONG); | |
186 | ||
187 | ef4_for_each_channel(channel, efx) { | |
188 | read_ptr[channel->channel] = channel->eventq_read_ptr; | |
189 | set_bit(channel->channel, &dma_pend); | |
190 | set_bit(channel->channel, &int_pend); | |
191 | ef4_nic_event_test_start(channel); | |
192 | } | |
193 | ||
194 | timeout = jiffies + IRQ_TIMEOUT; | |
195 | wait = 1; | |
196 | ||
197 | /* Wait for arrival of interrupts. NAPI processing may or may | |
198 | * not complete in time, but we can cope in any case. | |
199 | */ | |
200 | do { | |
201 | schedule_timeout_uninterruptible(wait); | |
202 | ||
203 | ef4_for_each_channel(channel, efx) { | |
204 | ef4_stop_eventq(channel); | |
205 | if (channel->eventq_read_ptr != | |
206 | read_ptr[channel->channel]) { | |
207 | set_bit(channel->channel, &napi_ran); | |
208 | clear_bit(channel->channel, &dma_pend); | |
209 | clear_bit(channel->channel, &int_pend); | |
210 | } else { | |
211 | if (ef4_nic_event_present(channel)) | |
212 | clear_bit(channel->channel, &dma_pend); | |
213 | if (ef4_nic_event_test_irq_cpu(channel) >= 0) | |
214 | clear_bit(channel->channel, &int_pend); | |
215 | } | |
216 | ef4_start_eventq(channel); | |
217 | } | |
218 | ||
219 | wait *= 2; | |
220 | } while ((dma_pend || int_pend) && time_before(jiffies, timeout)); | |
221 | ||
222 | ef4_for_each_channel(channel, efx) { | |
223 | bool dma_seen = !test_bit(channel->channel, &dma_pend); | |
224 | bool int_seen = !test_bit(channel->channel, &int_pend); | |
225 | ||
226 | tests->eventq_dma[channel->channel] = dma_seen ? 1 : -1; | |
227 | tests->eventq_int[channel->channel] = int_seen ? 1 : -1; | |
228 | ||
229 | if (dma_seen && int_seen) { | |
230 | netif_dbg(efx, drv, efx->net_dev, | |
231 | "channel %d event queue passed (with%s NAPI)\n", | |
232 | channel->channel, | |
233 | test_bit(channel->channel, &napi_ran) ? | |
234 | "" : "out"); | |
235 | } else { | |
236 | /* Report failure and whether either interrupt or DMA | |
237 | * worked | |
238 | */ | |
239 | netif_err(efx, drv, efx->net_dev, | |
240 | "channel %d timed out waiting for event queue\n", | |
241 | channel->channel); | |
242 | if (int_seen) | |
243 | netif_err(efx, drv, efx->net_dev, | |
244 | "channel %d saw interrupt " | |
245 | "during event queue test\n", | |
246 | channel->channel); | |
247 | if (dma_seen) | |
248 | netif_err(efx, drv, efx->net_dev, | |
249 | "channel %d event was generated, but " | |
250 | "failed to trigger an interrupt\n", | |
251 | channel->channel); | |
252 | } | |
253 | } | |
254 | ||
255 | return (dma_pend || int_pend) ? -ETIMEDOUT : 0; | |
256 | } | |
257 | ||
258 | static int ef4_test_phy(struct ef4_nic *efx, struct ef4_self_tests *tests, | |
259 | unsigned flags) | |
260 | { | |
261 | int rc; | |
262 | ||
263 | if (!efx->phy_op->run_tests) | |
264 | return 0; | |
265 | ||
266 | mutex_lock(&efx->mac_lock); | |
267 | rc = efx->phy_op->run_tests(efx, tests->phy_ext, flags); | |
268 | mutex_unlock(&efx->mac_lock); | |
269 | if (rc == -EPERM) | |
270 | rc = 0; | |
271 | else | |
272 | netif_info(efx, drv, efx->net_dev, | |
273 | "%s phy selftest\n", rc ? "Failed" : "Passed"); | |
274 | ||
275 | return rc; | |
276 | } | |
277 | ||
278 | /************************************************************************** | |
279 | * | |
280 | * Loopback testing | |
281 | * NB Only one loopback test can be executing concurrently. | |
282 | * | |
283 | **************************************************************************/ | |
284 | ||
285 | /* Loopback test RX callback | |
286 | * This is called for each received packet during loopback testing. | |
287 | */ | |
288 | void ef4_loopback_rx_packet(struct ef4_nic *efx, | |
289 | const char *buf_ptr, int pkt_len) | |
290 | { | |
291 | struct ef4_loopback_state *state = efx->loopback_selftest; | |
1186c6b3 | 292 | struct ef4_loopback_payload received; |
5a6681e2 EC |
293 | struct ef4_loopback_payload *payload; |
294 | ||
295 | BUG_ON(!buf_ptr); | |
296 | ||
297 | /* If we are just flushing, then drop the packet */ | |
298 | if ((state == NULL) || state->flush) | |
299 | return; | |
300 | ||
301 | payload = &state->payload; | |
302 | ||
55c1528f | 303 | memcpy(&received.packet, buf_ptr, |
1186c6b3 EC |
304 | min_t(int, pkt_len, EF4_LOOPBACK_PAYLOAD_LEN)); |
305 | received.ip.saddr = payload->ip.saddr; | |
5a6681e2 | 306 | if (state->offload_csum) |
1186c6b3 | 307 | received.ip.check = payload->ip.check; |
5a6681e2 EC |
308 | |
309 | /* Check that header exists */ | |
1186c6b3 | 310 | if (pkt_len < sizeof(received.header)) { |
5a6681e2 EC |
311 | netif_err(efx, drv, efx->net_dev, |
312 | "saw runt RX packet (length %d) in %s loopback " | |
313 | "test\n", pkt_len, LOOPBACK_MODE(efx)); | |
314 | goto err; | |
315 | } | |
316 | ||
317 | /* Check that the ethernet header exists */ | |
1186c6b3 | 318 | if (memcmp(&received.header, &payload->header, ETH_HLEN) != 0) { |
5a6681e2 EC |
319 | netif_err(efx, drv, efx->net_dev, |
320 | "saw non-loopback RX packet in %s loopback test\n", | |
321 | LOOPBACK_MODE(efx)); | |
322 | goto err; | |
323 | } | |
324 | ||
325 | /* Check packet length */ | |
1186c6b3 | 326 | if (pkt_len != EF4_LOOPBACK_PAYLOAD_LEN) { |
5a6681e2 EC |
327 | netif_err(efx, drv, efx->net_dev, |
328 | "saw incorrect RX packet length %d (wanted %d) in " | |
1186c6b3 EC |
329 | "%s loopback test\n", pkt_len, |
330 | (int)EF4_LOOPBACK_PAYLOAD_LEN, LOOPBACK_MODE(efx)); | |
5a6681e2 EC |
331 | goto err; |
332 | } | |
333 | ||
334 | /* Check that IP header matches */ | |
1186c6b3 | 335 | if (memcmp(&received.ip, &payload->ip, sizeof(payload->ip)) != 0) { |
5a6681e2 EC |
336 | netif_err(efx, drv, efx->net_dev, |
337 | "saw corrupted IP header in %s loopback test\n", | |
338 | LOOPBACK_MODE(efx)); | |
339 | goto err; | |
340 | } | |
341 | ||
342 | /* Check that msg and padding matches */ | |
1186c6b3 | 343 | if (memcmp(&received.msg, &payload->msg, sizeof(received.msg)) != 0) { |
5a6681e2 EC |
344 | netif_err(efx, drv, efx->net_dev, |
345 | "saw corrupted RX packet in %s loopback test\n", | |
346 | LOOPBACK_MODE(efx)); | |
347 | goto err; | |
348 | } | |
349 | ||
350 | /* Check that iteration matches */ | |
1186c6b3 | 351 | if (received.iteration != payload->iteration) { |
5a6681e2 EC |
352 | netif_err(efx, drv, efx->net_dev, |
353 | "saw RX packet from iteration %d (wanted %d) in " | |
1186c6b3 | 354 | "%s loopback test\n", ntohs(received.iteration), |
5a6681e2 EC |
355 | ntohs(payload->iteration), LOOPBACK_MODE(efx)); |
356 | goto err; | |
357 | } | |
358 | ||
359 | /* Increase correct RX count */ | |
360 | netif_vdbg(efx, drv, efx->net_dev, | |
361 | "got loopback RX in %s loopback test\n", LOOPBACK_MODE(efx)); | |
362 | ||
363 | atomic_inc(&state->rx_good); | |
364 | return; | |
365 | ||
366 | err: | |
367 | #ifdef DEBUG | |
368 | if (atomic_read(&state->rx_bad) == 0) { | |
369 | netif_err(efx, drv, efx->net_dev, "received packet:\n"); | |
370 | print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 0x10, 1, | |
371 | buf_ptr, pkt_len, 0); | |
372 | netif_err(efx, drv, efx->net_dev, "expected packet:\n"); | |
373 | print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 0x10, 1, | |
55c1528f | 374 | &state->payload.packet, EF4_LOOPBACK_PAYLOAD_LEN, |
1186c6b3 | 375 | 0); |
5a6681e2 EC |
376 | } |
377 | #endif | |
378 | atomic_inc(&state->rx_bad); | |
379 | } | |
380 | ||
381 | /* Initialise an ef4_selftest_state for a new iteration */ | |
382 | static void ef4_iterate_state(struct ef4_nic *efx) | |
383 | { | |
384 | struct ef4_loopback_state *state = efx->loopback_selftest; | |
385 | struct net_device *net_dev = efx->net_dev; | |
386 | struct ef4_loopback_payload *payload = &state->payload; | |
387 | ||
388 | /* Initialise the layerII header */ | |
389 | ether_addr_copy((u8 *)&payload->header.h_dest, net_dev->dev_addr); | |
390 | ether_addr_copy((u8 *)&payload->header.h_source, payload_source); | |
391 | payload->header.h_proto = htons(ETH_P_IP); | |
392 | ||
393 | /* saddr set later and used as incrementing count */ | |
394 | payload->ip.daddr = htonl(INADDR_LOOPBACK); | |
395 | payload->ip.ihl = 5; | |
396 | payload->ip.check = (__force __sum16) htons(0xdead); | |
1186c6b3 EC |
397 | payload->ip.tot_len = htons(sizeof(*payload) - |
398 | offsetof(struct ef4_loopback_payload, ip)); | |
5a6681e2 EC |
399 | payload->ip.version = IPVERSION; |
400 | payload->ip.protocol = IPPROTO_UDP; | |
401 | ||
402 | /* Initialise udp header */ | |
403 | payload->udp.source = 0; | |
1186c6b3 EC |
404 | payload->udp.len = htons(sizeof(*payload) - |
405 | offsetof(struct ef4_loopback_payload, udp)); | |
5a6681e2 EC |
406 | payload->udp.check = 0; /* checksum ignored */ |
407 | ||
408 | /* Fill out payload */ | |
409 | payload->iteration = htons(ntohs(payload->iteration) + 1); | |
410 | memcpy(&payload->msg, payload_msg, sizeof(payload_msg)); | |
411 | ||
412 | /* Fill out remaining state members */ | |
413 | atomic_set(&state->rx_good, 0); | |
414 | atomic_set(&state->rx_bad, 0); | |
415 | smp_wmb(); | |
416 | } | |
417 | ||
418 | static int ef4_begin_loopback(struct ef4_tx_queue *tx_queue) | |
419 | { | |
420 | struct ef4_nic *efx = tx_queue->efx; | |
421 | struct ef4_loopback_state *state = efx->loopback_selftest; | |
422 | struct ef4_loopback_payload *payload; | |
423 | struct sk_buff *skb; | |
424 | int i; | |
425 | netdev_tx_t rc; | |
426 | ||
427 | /* Transmit N copies of buffer */ | |
428 | for (i = 0; i < state->packet_count; i++) { | |
429 | /* Allocate an skb, holding an extra reference for | |
430 | * transmit completion counting */ | |
1186c6b3 | 431 | skb = alloc_skb(EF4_LOOPBACK_PAYLOAD_LEN, GFP_KERNEL); |
5a6681e2 EC |
432 | if (!skb) |
433 | return -ENOMEM; | |
434 | state->skbs[i] = skb; | |
435 | skb_get(skb); | |
436 | ||
437 | /* Copy the payload in, incrementing the source address to | |
438 | * exercise the rss vectors */ | |
4df864c1 | 439 | payload = skb_put(skb, sizeof(state->payload)); |
5a6681e2 EC |
440 | memcpy(payload, &state->payload, sizeof(state->payload)); |
441 | payload->ip.saddr = htonl(INADDR_LOOPBACK | (i << 2)); | |
1186c6b3 EC |
442 | /* Strip off the leading padding */ |
443 | skb_pull(skb, offsetof(struct ef4_loopback_payload, header)); | |
55c1528f EC |
444 | /* Strip off the trailing padding */ |
445 | skb_trim(skb, EF4_LOOPBACK_PAYLOAD_LEN); | |
5a6681e2 EC |
446 | |
447 | /* Ensure everything we've written is visible to the | |
448 | * interrupt handler. */ | |
449 | smp_wmb(); | |
450 | ||
451 | netif_tx_lock_bh(efx->net_dev); | |
452 | rc = ef4_enqueue_skb(tx_queue, skb); | |
453 | netif_tx_unlock_bh(efx->net_dev); | |
454 | ||
455 | if (rc != NETDEV_TX_OK) { | |
456 | netif_err(efx, drv, efx->net_dev, | |
457 | "TX queue %d could not transmit packet %d of " | |
458 | "%d in %s loopback test\n", tx_queue->queue, | |
459 | i + 1, state->packet_count, | |
460 | LOOPBACK_MODE(efx)); | |
461 | ||
462 | /* Defer cleaning up the other skbs for the caller */ | |
463 | kfree_skb(skb); | |
464 | return -EPIPE; | |
465 | } | |
466 | } | |
467 | ||
468 | return 0; | |
469 | } | |
470 | ||
471 | static int ef4_poll_loopback(struct ef4_nic *efx) | |
472 | { | |
473 | struct ef4_loopback_state *state = efx->loopback_selftest; | |
474 | ||
475 | return atomic_read(&state->rx_good) == state->packet_count; | |
476 | } | |
477 | ||
478 | static int ef4_end_loopback(struct ef4_tx_queue *tx_queue, | |
479 | struct ef4_loopback_self_tests *lb_tests) | |
480 | { | |
481 | struct ef4_nic *efx = tx_queue->efx; | |
482 | struct ef4_loopback_state *state = efx->loopback_selftest; | |
483 | struct sk_buff *skb; | |
484 | int tx_done = 0, rx_good, rx_bad; | |
485 | int i, rc = 0; | |
486 | ||
487 | netif_tx_lock_bh(efx->net_dev); | |
488 | ||
489 | /* Count the number of tx completions, and decrement the refcnt. Any | |
490 | * skbs not already completed will be free'd when the queue is flushed */ | |
491 | for (i = 0; i < state->packet_count; i++) { | |
492 | skb = state->skbs[i]; | |
493 | if (skb && !skb_shared(skb)) | |
494 | ++tx_done; | |
495 | dev_kfree_skb(skb); | |
496 | } | |
497 | ||
498 | netif_tx_unlock_bh(efx->net_dev); | |
499 | ||
500 | /* Check TX completion and received packet counts */ | |
501 | rx_good = atomic_read(&state->rx_good); | |
502 | rx_bad = atomic_read(&state->rx_bad); | |
503 | if (tx_done != state->packet_count) { | |
504 | /* Don't free the skbs; they will be picked up on TX | |
505 | * overflow or channel teardown. | |
506 | */ | |
507 | netif_err(efx, drv, efx->net_dev, | |
508 | "TX queue %d saw only %d out of an expected %d " | |
509 | "TX completion events in %s loopback test\n", | |
510 | tx_queue->queue, tx_done, state->packet_count, | |
511 | LOOPBACK_MODE(efx)); | |
512 | rc = -ETIMEDOUT; | |
513 | /* Allow to fall through so we see the RX errors as well */ | |
514 | } | |
515 | ||
516 | /* We may always be up to a flush away from our desired packet total */ | |
517 | if (rx_good != state->packet_count) { | |
518 | netif_dbg(efx, drv, efx->net_dev, | |
519 | "TX queue %d saw only %d out of an expected %d " | |
520 | "received packets in %s loopback test\n", | |
521 | tx_queue->queue, rx_good, state->packet_count, | |
522 | LOOPBACK_MODE(efx)); | |
523 | rc = -ETIMEDOUT; | |
524 | /* Fall through */ | |
525 | } | |
526 | ||
527 | /* Update loopback test structure */ | |
528 | lb_tests->tx_sent[tx_queue->queue] += state->packet_count; | |
529 | lb_tests->tx_done[tx_queue->queue] += tx_done; | |
530 | lb_tests->rx_good += rx_good; | |
531 | lb_tests->rx_bad += rx_bad; | |
532 | ||
533 | return rc; | |
534 | } | |
535 | ||
536 | static int | |
537 | ef4_test_loopback(struct ef4_tx_queue *tx_queue, | |
538 | struct ef4_loopback_self_tests *lb_tests) | |
539 | { | |
540 | struct ef4_nic *efx = tx_queue->efx; | |
541 | struct ef4_loopback_state *state = efx->loopback_selftest; | |
542 | int i, begin_rc, end_rc; | |
543 | ||
544 | for (i = 0; i < 3; i++) { | |
545 | /* Determine how many packets to send */ | |
546 | state->packet_count = efx->txq_entries / 3; | |
547 | state->packet_count = min(1 << (i << 2), state->packet_count); | |
548 | state->skbs = kcalloc(state->packet_count, | |
549 | sizeof(state->skbs[0]), GFP_KERNEL); | |
550 | if (!state->skbs) | |
551 | return -ENOMEM; | |
552 | state->flush = false; | |
553 | ||
554 | netif_dbg(efx, drv, efx->net_dev, | |
555 | "TX queue %d testing %s loopback with %d packets\n", | |
556 | tx_queue->queue, LOOPBACK_MODE(efx), | |
557 | state->packet_count); | |
558 | ||
559 | ef4_iterate_state(efx); | |
560 | begin_rc = ef4_begin_loopback(tx_queue); | |
561 | ||
562 | /* This will normally complete very quickly, but be | |
563 | * prepared to wait much longer. */ | |
564 | msleep(1); | |
565 | if (!ef4_poll_loopback(efx)) { | |
566 | msleep(LOOPBACK_TIMEOUT_MS); | |
567 | ef4_poll_loopback(efx); | |
568 | } | |
569 | ||
570 | end_rc = ef4_end_loopback(tx_queue, lb_tests); | |
571 | kfree(state->skbs); | |
572 | ||
573 | if (begin_rc || end_rc) { | |
574 | /* Wait a while to ensure there are no packets | |
575 | * floating around after a failure. */ | |
576 | schedule_timeout_uninterruptible(HZ / 10); | |
577 | return begin_rc ? begin_rc : end_rc; | |
578 | } | |
579 | } | |
580 | ||
581 | netif_dbg(efx, drv, efx->net_dev, | |
582 | "TX queue %d passed %s loopback test with a burst length " | |
583 | "of %d packets\n", tx_queue->queue, LOOPBACK_MODE(efx), | |
584 | state->packet_count); | |
585 | ||
586 | return 0; | |
587 | } | |
588 | ||
589 | /* Wait for link up. On Falcon, we would prefer to rely on ef4_monitor, but | |
590 | * any contention on the mac lock (via e.g. ef4_mac_mcast_work) causes it | |
591 | * to delay and retry. Therefore, it's safer to just poll directly. Wait | |
592 | * for link up and any faults to dissipate. */ | |
593 | static int ef4_wait_for_link(struct ef4_nic *efx) | |
594 | { | |
595 | struct ef4_link_state *link_state = &efx->link_state; | |
596 | int count, link_up_count = 0; | |
597 | bool link_up; | |
598 | ||
599 | for (count = 0; count < 40; count++) { | |
600 | schedule_timeout_uninterruptible(HZ / 10); | |
601 | ||
602 | if (efx->type->monitor != NULL) { | |
603 | mutex_lock(&efx->mac_lock); | |
604 | efx->type->monitor(efx); | |
605 | mutex_unlock(&efx->mac_lock); | |
606 | } | |
607 | ||
608 | mutex_lock(&efx->mac_lock); | |
609 | link_up = link_state->up; | |
610 | if (link_up) | |
611 | link_up = !efx->type->check_mac_fault(efx); | |
612 | mutex_unlock(&efx->mac_lock); | |
613 | ||
614 | if (link_up) { | |
615 | if (++link_up_count == 2) | |
616 | return 0; | |
617 | } else { | |
618 | link_up_count = 0; | |
619 | } | |
620 | } | |
621 | ||
622 | return -ETIMEDOUT; | |
623 | } | |
624 | ||
625 | static int ef4_test_loopbacks(struct ef4_nic *efx, struct ef4_self_tests *tests, | |
626 | unsigned int loopback_modes) | |
627 | { | |
628 | enum ef4_loopback_mode mode; | |
629 | struct ef4_loopback_state *state; | |
630 | struct ef4_channel *channel = | |
631 | ef4_get_channel(efx, efx->tx_channel_offset); | |
632 | struct ef4_tx_queue *tx_queue; | |
633 | int rc = 0; | |
634 | ||
635 | /* Set the port loopback_selftest member. From this point on | |
636 | * all received packets will be dropped. Mark the state as | |
637 | * "flushing" so all inflight packets are dropped */ | |
638 | state = kzalloc(sizeof(*state), GFP_KERNEL); | |
639 | if (state == NULL) | |
640 | return -ENOMEM; | |
641 | BUG_ON(efx->loopback_selftest); | |
642 | state->flush = true; | |
643 | efx->loopback_selftest = state; | |
644 | ||
645 | /* Test all supported loopback modes */ | |
646 | for (mode = LOOPBACK_NONE; mode <= LOOPBACK_TEST_MAX; mode++) { | |
647 | if (!(loopback_modes & (1 << mode))) | |
648 | continue; | |
649 | ||
650 | /* Move the port into the specified loopback mode. */ | |
651 | state->flush = true; | |
652 | mutex_lock(&efx->mac_lock); | |
653 | efx->loopback_mode = mode; | |
654 | rc = __ef4_reconfigure_port(efx); | |
655 | mutex_unlock(&efx->mac_lock); | |
656 | if (rc) { | |
657 | netif_err(efx, drv, efx->net_dev, | |
658 | "unable to move into %s loopback\n", | |
659 | LOOPBACK_MODE(efx)); | |
660 | goto out; | |
661 | } | |
662 | ||
663 | rc = ef4_wait_for_link(efx); | |
664 | if (rc) { | |
665 | netif_err(efx, drv, efx->net_dev, | |
666 | "loopback %s never came up\n", | |
667 | LOOPBACK_MODE(efx)); | |
668 | goto out; | |
669 | } | |
670 | ||
671 | /* Test all enabled types of TX queue */ | |
672 | ef4_for_each_channel_tx_queue(tx_queue, channel) { | |
673 | state->offload_csum = (tx_queue->queue & | |
674 | EF4_TXQ_TYPE_OFFLOAD); | |
675 | rc = ef4_test_loopback(tx_queue, | |
676 | &tests->loopback[mode]); | |
677 | if (rc) | |
678 | goto out; | |
679 | } | |
680 | } | |
681 | ||
682 | out: | |
683 | /* Remove the flush. The caller will remove the loopback setting */ | |
684 | state->flush = true; | |
685 | efx->loopback_selftest = NULL; | |
686 | wmb(); | |
687 | kfree(state); | |
688 | ||
689 | if (rc == -EPERM) | |
690 | rc = 0; | |
691 | ||
692 | return rc; | |
693 | } | |
694 | ||
695 | /************************************************************************** | |
696 | * | |
697 | * Entry point | |
698 | * | |
699 | *************************************************************************/ | |
700 | ||
701 | int ef4_selftest(struct ef4_nic *efx, struct ef4_self_tests *tests, | |
702 | unsigned flags) | |
703 | { | |
704 | enum ef4_loopback_mode loopback_mode = efx->loopback_mode; | |
705 | int phy_mode = efx->phy_mode; | |
706 | int rc_test = 0, rc_reset, rc; | |
707 | ||
708 | ef4_selftest_async_cancel(efx); | |
709 | ||
710 | /* Online (i.e. non-disruptive) testing | |
711 | * This checks interrupt generation, event delivery and PHY presence. */ | |
712 | ||
713 | rc = ef4_test_phy_alive(efx, tests); | |
714 | if (rc && !rc_test) | |
715 | rc_test = rc; | |
716 | ||
717 | rc = ef4_test_nvram(efx, tests); | |
718 | if (rc && !rc_test) | |
719 | rc_test = rc; | |
720 | ||
721 | rc = ef4_test_interrupts(efx, tests); | |
722 | if (rc && !rc_test) | |
723 | rc_test = rc; | |
724 | ||
725 | rc = ef4_test_eventq_irq(efx, tests); | |
726 | if (rc && !rc_test) | |
727 | rc_test = rc; | |
728 | ||
729 | if (rc_test) | |
730 | return rc_test; | |
731 | ||
732 | if (!(flags & ETH_TEST_FL_OFFLINE)) | |
733 | return ef4_test_phy(efx, tests, flags); | |
734 | ||
735 | /* Offline (i.e. disruptive) testing | |
736 | * This checks MAC and PHY loopback on the specified port. */ | |
737 | ||
738 | /* Detach the device so the kernel doesn't transmit during the | |
739 | * loopback test and the watchdog timeout doesn't fire. | |
740 | */ | |
741 | ef4_device_detach_sync(efx); | |
742 | ||
743 | if (efx->type->test_chip) { | |
744 | rc_reset = efx->type->test_chip(efx, tests); | |
745 | if (rc_reset) { | |
746 | netif_err(efx, hw, efx->net_dev, | |
747 | "Unable to recover from chip test\n"); | |
748 | ef4_schedule_reset(efx, RESET_TYPE_DISABLE); | |
749 | return rc_reset; | |
750 | } | |
751 | ||
752 | if ((tests->memory < 0 || tests->registers < 0) && !rc_test) | |
753 | rc_test = -EIO; | |
754 | } | |
755 | ||
756 | /* Ensure that the phy is powered and out of loopback | |
757 | * for the bist and loopback tests */ | |
758 | mutex_lock(&efx->mac_lock); | |
759 | efx->phy_mode &= ~PHY_MODE_LOW_POWER; | |
760 | efx->loopback_mode = LOOPBACK_NONE; | |
761 | __ef4_reconfigure_port(efx); | |
762 | mutex_unlock(&efx->mac_lock); | |
763 | ||
764 | rc = ef4_test_phy(efx, tests, flags); | |
765 | if (rc && !rc_test) | |
766 | rc_test = rc; | |
767 | ||
768 | rc = ef4_test_loopbacks(efx, tests, efx->loopback_modes); | |
769 | if (rc && !rc_test) | |
770 | rc_test = rc; | |
771 | ||
772 | /* restore the PHY to the previous state */ | |
773 | mutex_lock(&efx->mac_lock); | |
774 | efx->phy_mode = phy_mode; | |
775 | efx->loopback_mode = loopback_mode; | |
776 | __ef4_reconfigure_port(efx); | |
777 | mutex_unlock(&efx->mac_lock); | |
778 | ||
779 | netif_device_attach(efx->net_dev); | |
780 | ||
781 | return rc_test; | |
782 | } | |
783 | ||
784 | void ef4_selftest_async_start(struct ef4_nic *efx) | |
785 | { | |
786 | struct ef4_channel *channel; | |
787 | ||
788 | ef4_for_each_channel(channel, efx) | |
789 | ef4_nic_event_test_start(channel); | |
790 | schedule_delayed_work(&efx->selftest_work, IRQ_TIMEOUT); | |
791 | } | |
792 | ||
793 | void ef4_selftest_async_cancel(struct ef4_nic *efx) | |
794 | { | |
795 | cancel_delayed_work_sync(&efx->selftest_work); | |
796 | } | |
797 | ||
798 | void ef4_selftest_async_work(struct work_struct *data) | |
799 | { | |
800 | struct ef4_nic *efx = container_of(data, struct ef4_nic, | |
801 | selftest_work.work); | |
802 | struct ef4_channel *channel; | |
803 | int cpu; | |
804 | ||
805 | ef4_for_each_channel(channel, efx) { | |
806 | cpu = ef4_nic_event_test_irq_cpu(channel); | |
807 | if (cpu < 0) | |
808 | netif_err(efx, ifup, efx->net_dev, | |
809 | "channel %d failed to trigger an interrupt\n", | |
810 | channel->channel); | |
811 | else | |
812 | netif_dbg(efx, ifup, efx->net_dev, | |
813 | "channel %d triggered interrupt on CPU %d\n", | |
814 | channel->channel, cpu); | |
815 | } | |
816 | } |