Commit | Line | Data |
---|---|---|
768fd266 AM |
1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /**************************************************************************** | |
3 | * Driver for Solarflare network controllers and boards | |
4 | * Copyright 2018 Solarflare Communications Inc. | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or modify it | |
7 | * under the terms of the GNU General Public License version 2 as published | |
8 | * by the Free Software Foundation, incorporated herein by reference. | |
9 | */ | |
10 | ||
11 | #include "net_driver.h" | |
12 | #include <linux/module.h> | |
13 | #include "efx_channels.h" | |
14 | #include "efx.h" | |
15 | #include "efx_common.h" | |
16 | #include "tx_common.h" | |
17 | #include "rx_common.h" | |
18 | #include "nic.h" | |
19 | #include "sriov.h" | |
20 | ||
83975485 AM |
21 | /* This is the first interrupt mode to try out of: |
22 | * 0 => MSI-X | |
23 | * 1 => MSI | |
24 | * 2 => legacy | |
25 | */ | |
26 | static unsigned int interrupt_mode; | |
27 | module_param(interrupt_mode, uint, 0444); | |
28 | MODULE_PARM_DESC(interrupt_mode, | |
29 | "Interrupt mode (0=>MSIX 1=>MSI 2=>legacy)"); | |
30 | ||
37c45a4e AM |
31 | /* This is the requested number of CPUs to use for Receive-Side Scaling (RSS), |
32 | * i.e. the number of CPUs among which we may distribute simultaneous | |
33 | * interrupt handling. | |
34 | * | |
35 | * Cards without MSI-X will only target one CPU via legacy or MSI interrupt. | |
36 | * The default (0) means to assign an interrupt to each core. | |
37 | */ | |
38 | static unsigned int rss_cpus; | |
39 | module_param(rss_cpus, uint, 0444); | |
40 | MODULE_PARM_DESC(rss_cpus, "Number of CPUs to use for Receive-Side Scaling"); | |
41 | ||
768fd266 AM |
42 | static unsigned int irq_adapt_low_thresh = 8000; |
43 | module_param(irq_adapt_low_thresh, uint, 0644); | |
44 | MODULE_PARM_DESC(irq_adapt_low_thresh, | |
45 | "Threshold score for reducing IRQ moderation"); | |
46 | ||
47 | static unsigned int irq_adapt_high_thresh = 16000; | |
48 | module_param(irq_adapt_high_thresh, uint, 0644); | |
49 | MODULE_PARM_DESC(irq_adapt_high_thresh, | |
50 | "Threshold score for increasing IRQ moderation"); | |
51 | ||
52 | /* This is the weight assigned to each of the (per-channel) virtual | |
53 | * NAPI devices. | |
54 | */ | |
55 | static int napi_weight = 64; | |
56 | ||
83975485 AM |
57 | /*************** |
58 | * Housekeeping | |
59 | ***************/ | |
60 | ||
61 | int efx_channel_dummy_op_int(struct efx_channel *channel) | |
62 | { | |
63 | return 0; | |
64 | } | |
65 | ||
66 | void efx_channel_dummy_op_void(struct efx_channel *channel) | |
67 | { | |
68 | } | |
69 | ||
70 | static const struct efx_channel_type efx_default_channel_type = { | |
71 | .pre_probe = efx_channel_dummy_op_int, | |
72 | .post_remove = efx_channel_dummy_op_void, | |
73 | .get_name = efx_get_channel_name, | |
74 | .copy = efx_copy_channel, | |
75 | .want_txqs = efx_default_channel_want_txqs, | |
76 | .keep_eventq = false, | |
77 | .want_pio = true, | |
78 | }; | |
79 | ||
37c45a4e AM |
80 | /************* |
81 | * INTERRUPTS | |
82 | *************/ | |
83 | ||
84 | static unsigned int efx_wanted_parallelism(struct efx_nic *efx) | |
85 | { | |
86 | cpumask_var_t thread_mask; | |
87 | unsigned int count; | |
88 | int cpu; | |
89 | ||
90 | if (rss_cpus) { | |
91 | count = rss_cpus; | |
92 | } else { | |
93 | if (unlikely(!zalloc_cpumask_var(&thread_mask, GFP_KERNEL))) { | |
94 | netif_warn(efx, probe, efx->net_dev, | |
95 | "RSS disabled due to allocation failure\n"); | |
96 | return 1; | |
97 | } | |
98 | ||
99 | count = 0; | |
100 | for_each_online_cpu(cpu) { | |
101 | if (!cpumask_test_cpu(cpu, thread_mask)) { | |
102 | ++count; | |
103 | cpumask_or(thread_mask, thread_mask, | |
104 | topology_sibling_cpumask(cpu)); | |
105 | } | |
106 | } | |
107 | ||
108 | free_cpumask_var(thread_mask); | |
109 | } | |
110 | ||
111 | if (count > EFX_MAX_RX_QUEUES) { | |
112 | netif_cond_dbg(efx, probe, efx->net_dev, !rss_cpus, warn, | |
113 | "Reducing number of rx queues from %u to %u.\n", | |
114 | count, EFX_MAX_RX_QUEUES); | |
115 | count = EFX_MAX_RX_QUEUES; | |
116 | } | |
117 | ||
118 | /* If RSS is requested for the PF *and* VFs then we can't write RSS | |
119 | * table entries that are inaccessible to VFs | |
120 | */ | |
121 | #ifdef CONFIG_SFC_SRIOV | |
122 | if (efx->type->sriov_wanted) { | |
123 | if (efx->type->sriov_wanted(efx) && efx_vf_size(efx) > 1 && | |
124 | count > efx_vf_size(efx)) { | |
125 | netif_warn(efx, probe, efx->net_dev, | |
126 | "Reducing number of RSS channels from %u to %u for " | |
127 | "VF support. Increase vf-msix-limit to use more " | |
128 | "channels on the PF.\n", | |
129 | count, efx_vf_size(efx)); | |
130 | count = efx_vf_size(efx); | |
131 | } | |
132 | } | |
133 | #endif | |
134 | ||
135 | return count; | |
136 | } | |
137 | ||
138 | static int efx_allocate_msix_channels(struct efx_nic *efx, | |
139 | unsigned int max_channels, | |
140 | unsigned int extra_channels, | |
141 | unsigned int parallelism) | |
142 | { | |
143 | unsigned int n_channels = parallelism; | |
144 | int vec_count; | |
145 | int n_xdp_tx; | |
146 | int n_xdp_ev; | |
147 | ||
148 | if (efx_separate_tx_channels) | |
149 | n_channels *= 2; | |
150 | n_channels += extra_channels; | |
151 | ||
152 | /* To allow XDP transmit to happen from arbitrary NAPI contexts | |
153 | * we allocate a TX queue per CPU. We share event queues across | |
154 | * multiple tx queues, assuming tx and ev queues are both | |
155 | * maximum size. | |
156 | */ | |
157 | ||
158 | n_xdp_tx = num_possible_cpus(); | |
159 | n_xdp_ev = DIV_ROUND_UP(n_xdp_tx, EFX_TXQ_TYPES); | |
160 | ||
161 | vec_count = pci_msix_vec_count(efx->pci_dev); | |
162 | if (vec_count < 0) | |
163 | return vec_count; | |
164 | ||
165 | max_channels = min_t(unsigned int, vec_count, max_channels); | |
166 | ||
167 | /* Check resources. | |
168 | * We need a channel per event queue, plus a VI per tx queue. | |
169 | * This may be more pessimistic than it needs to be. | |
170 | */ | |
171 | if (n_channels + n_xdp_ev > max_channels) { | |
172 | netif_err(efx, drv, efx->net_dev, | |
173 | "Insufficient resources for %d XDP event queues (%d other channels, max %d)\n", | |
174 | n_xdp_ev, n_channels, max_channels); | |
175 | efx->n_xdp_channels = 0; | |
176 | efx->xdp_tx_per_channel = 0; | |
177 | efx->xdp_tx_queue_count = 0; | |
178 | } else { | |
179 | efx->n_xdp_channels = n_xdp_ev; | |
180 | efx->xdp_tx_per_channel = EFX_TXQ_TYPES; | |
181 | efx->xdp_tx_queue_count = n_xdp_tx; | |
182 | n_channels += n_xdp_ev; | |
183 | netif_dbg(efx, drv, efx->net_dev, | |
184 | "Allocating %d TX and %d event queues for XDP\n", | |
185 | n_xdp_tx, n_xdp_ev); | |
186 | } | |
187 | ||
188 | if (vec_count < n_channels) { | |
189 | netif_err(efx, drv, efx->net_dev, | |
190 | "WARNING: Insufficient MSI-X vectors available (%d < %u).\n", | |
191 | vec_count, n_channels); | |
192 | netif_err(efx, drv, efx->net_dev, | |
193 | "WARNING: Performance may be reduced.\n"); | |
194 | n_channels = vec_count; | |
195 | } | |
196 | ||
197 | n_channels = min(n_channels, max_channels); | |
198 | ||
199 | efx->n_channels = n_channels; | |
200 | ||
201 | /* Ignore XDP tx channels when creating rx channels. */ | |
202 | n_channels -= efx->n_xdp_channels; | |
203 | ||
204 | if (efx_separate_tx_channels) { | |
205 | efx->n_tx_channels = | |
206 | min(max(n_channels / 2, 1U), | |
207 | efx->max_tx_channels); | |
208 | efx->tx_channel_offset = | |
209 | n_channels - efx->n_tx_channels; | |
210 | efx->n_rx_channels = | |
211 | max(n_channels - | |
212 | efx->n_tx_channels, 1U); | |
213 | } else { | |
214 | efx->n_tx_channels = min(n_channels, efx->max_tx_channels); | |
215 | efx->tx_channel_offset = 0; | |
216 | efx->n_rx_channels = n_channels; | |
217 | } | |
218 | ||
219 | efx->n_rx_channels = min(efx->n_rx_channels, parallelism); | |
220 | efx->n_tx_channels = min(efx->n_tx_channels, parallelism); | |
221 | ||
222 | efx->xdp_channel_offset = n_channels; | |
223 | ||
224 | netif_dbg(efx, drv, efx->net_dev, | |
225 | "Allocating %u RX channels\n", | |
226 | efx->n_rx_channels); | |
227 | ||
228 | return efx->n_channels; | |
229 | } | |
230 | ||
231 | /* Probe the number and type of interrupts we are able to obtain, and | |
232 | * the resulting numbers of channels and RX queues. | |
233 | */ | |
234 | int efx_probe_interrupts(struct efx_nic *efx) | |
235 | { | |
236 | unsigned int extra_channels = 0; | |
237 | unsigned int rss_spread; | |
238 | unsigned int i, j; | |
239 | int rc; | |
240 | ||
241 | for (i = 0; i < EFX_MAX_EXTRA_CHANNELS; i++) | |
242 | if (efx->extra_channel_type[i]) | |
243 | ++extra_channels; | |
244 | ||
245 | if (efx->interrupt_mode == EFX_INT_MODE_MSIX) { | |
246 | unsigned int parallelism = efx_wanted_parallelism(efx); | |
247 | struct msix_entry xentries[EFX_MAX_CHANNELS]; | |
248 | unsigned int n_channels; | |
249 | ||
250 | rc = efx_allocate_msix_channels(efx, efx->max_channels, | |
251 | extra_channels, parallelism); | |
252 | if (rc >= 0) { | |
253 | n_channels = rc; | |
254 | for (i = 0; i < n_channels; i++) | |
255 | xentries[i].entry = i; | |
256 | rc = pci_enable_msix_range(efx->pci_dev, xentries, 1, | |
257 | n_channels); | |
258 | } | |
259 | if (rc < 0) { | |
260 | /* Fall back to single channel MSI */ | |
261 | netif_err(efx, drv, efx->net_dev, | |
262 | "could not enable MSI-X\n"); | |
263 | if (efx->type->min_interrupt_mode >= EFX_INT_MODE_MSI) | |
264 | efx->interrupt_mode = EFX_INT_MODE_MSI; | |
265 | else | |
266 | return rc; | |
267 | } else if (rc < n_channels) { | |
268 | netif_err(efx, drv, efx->net_dev, | |
269 | "WARNING: Insufficient MSI-X vectors" | |
270 | " available (%d < %u).\n", rc, n_channels); | |
271 | netif_err(efx, drv, efx->net_dev, | |
272 | "WARNING: Performance may be reduced.\n"); | |
273 | n_channels = rc; | |
274 | } | |
275 | ||
276 | if (rc > 0) { | |
277 | for (i = 0; i < efx->n_channels; i++) | |
278 | efx_get_channel(efx, i)->irq = | |
279 | xentries[i].vector; | |
280 | } | |
281 | } | |
282 | ||
283 | /* Try single interrupt MSI */ | |
284 | if (efx->interrupt_mode == EFX_INT_MODE_MSI) { | |
285 | efx->n_channels = 1; | |
286 | efx->n_rx_channels = 1; | |
287 | efx->n_tx_channels = 1; | |
288 | efx->n_xdp_channels = 0; | |
289 | efx->xdp_channel_offset = efx->n_channels; | |
290 | rc = pci_enable_msi(efx->pci_dev); | |
291 | if (rc == 0) { | |
292 | efx_get_channel(efx, 0)->irq = efx->pci_dev->irq; | |
293 | } else { | |
294 | netif_err(efx, drv, efx->net_dev, | |
295 | "could not enable MSI\n"); | |
296 | if (efx->type->min_interrupt_mode >= EFX_INT_MODE_LEGACY) | |
297 | efx->interrupt_mode = EFX_INT_MODE_LEGACY; | |
298 | else | |
299 | return rc; | |
300 | } | |
301 | } | |
302 | ||
303 | /* Assume legacy interrupts */ | |
304 | if (efx->interrupt_mode == EFX_INT_MODE_LEGACY) { | |
305 | efx->n_channels = 1 + (efx_separate_tx_channels ? 1 : 0); | |
306 | efx->n_rx_channels = 1; | |
307 | efx->n_tx_channels = 1; | |
308 | efx->n_xdp_channels = 0; | |
309 | efx->xdp_channel_offset = efx->n_channels; | |
310 | efx->legacy_irq = efx->pci_dev->irq; | |
311 | } | |
312 | ||
313 | /* Assign extra channels if possible, before XDP channels */ | |
314 | efx->n_extra_tx_channels = 0; | |
315 | j = efx->xdp_channel_offset; | |
316 | for (i = 0; i < EFX_MAX_EXTRA_CHANNELS; i++) { | |
317 | if (!efx->extra_channel_type[i]) | |
318 | continue; | |
319 | if (j <= efx->tx_channel_offset + efx->n_tx_channels) { | |
320 | efx->extra_channel_type[i]->handle_no_channel(efx); | |
321 | } else { | |
322 | --j; | |
323 | efx_get_channel(efx, j)->type = | |
324 | efx->extra_channel_type[i]; | |
325 | if (efx_channel_has_tx_queues(efx_get_channel(efx, j))) | |
326 | efx->n_extra_tx_channels++; | |
327 | } | |
328 | } | |
329 | ||
330 | rss_spread = efx->n_rx_channels; | |
331 | /* RSS might be usable on VFs even if it is disabled on the PF */ | |
332 | #ifdef CONFIG_SFC_SRIOV | |
333 | if (efx->type->sriov_wanted) { | |
334 | efx->rss_spread = ((rss_spread > 1 || | |
335 | !efx->type->sriov_wanted(efx)) ? | |
336 | rss_spread : efx_vf_size(efx)); | |
337 | return 0; | |
338 | } | |
339 | #endif | |
340 | efx->rss_spread = rss_spread; | |
341 | ||
342 | return 0; | |
343 | } | |
344 | ||
345 | #if defined(CONFIG_SMP) | |
346 | void efx_set_interrupt_affinity(struct efx_nic *efx) | |
347 | { | |
348 | struct efx_channel *channel; | |
349 | unsigned int cpu; | |
350 | ||
351 | efx_for_each_channel(channel, efx) { | |
352 | cpu = cpumask_local_spread(channel->channel, | |
353 | pcibus_to_node(efx->pci_dev->bus)); | |
354 | irq_set_affinity_hint(channel->irq, cpumask_of(cpu)); | |
355 | } | |
356 | } | |
357 | ||
358 | void efx_clear_interrupt_affinity(struct efx_nic *efx) | |
359 | { | |
360 | struct efx_channel *channel; | |
361 | ||
362 | efx_for_each_channel(channel, efx) | |
363 | irq_set_affinity_hint(channel->irq, NULL); | |
364 | } | |
365 | #else | |
366 | void | |
367 | efx_set_interrupt_affinity(struct efx_nic *efx __attribute__ ((unused))) | |
368 | { | |
369 | } | |
370 | ||
371 | void | |
372 | efx_clear_interrupt_affinity(struct efx_nic *efx __attribute__ ((unused))) | |
373 | { | |
374 | } | |
375 | #endif /* CONFIG_SMP */ | |
376 | ||
377 | void efx_remove_interrupts(struct efx_nic *efx) | |
378 | { | |
379 | struct efx_channel *channel; | |
380 | ||
381 | /* Remove MSI/MSI-X interrupts */ | |
382 | efx_for_each_channel(channel, efx) | |
383 | channel->irq = 0; | |
384 | pci_disable_msi(efx->pci_dev); | |
385 | pci_disable_msix(efx->pci_dev); | |
386 | ||
387 | /* Remove legacy interrupt */ | |
388 | efx->legacy_irq = 0; | |
389 | } | |
390 | ||
5f999256 AM |
391 | /*************** |
392 | * EVENT QUEUES | |
393 | ***************/ | |
394 | ||
395 | /* Create event queue | |
396 | * Event queue memory allocations are done only once. If the channel | |
397 | * is reset, the memory buffer will be reused; this guards against | |
398 | * errors during channel reset and also simplifies interrupt handling. | |
399 | */ | |
400 | int efx_probe_eventq(struct efx_channel *channel) | |
401 | { | |
402 | struct efx_nic *efx = channel->efx; | |
403 | unsigned long entries; | |
404 | ||
405 | netif_dbg(efx, probe, efx->net_dev, | |
406 | "chan %d create event queue\n", channel->channel); | |
407 | ||
408 | /* Build an event queue with room for one event per tx and rx buffer, | |
409 | * plus some extra for link state events and MCDI completions. | |
410 | */ | |
411 | entries = roundup_pow_of_two(efx->rxq_entries + efx->txq_entries + 128); | |
412 | EFX_WARN_ON_PARANOID(entries > EFX_MAX_EVQ_SIZE); | |
413 | channel->eventq_mask = max(entries, EFX_MIN_EVQ_SIZE) - 1; | |
414 | ||
415 | return efx_nic_probe_eventq(channel); | |
416 | } | |
417 | ||
418 | /* Prepare channel's event queue */ | |
419 | int efx_init_eventq(struct efx_channel *channel) | |
420 | { | |
421 | struct efx_nic *efx = channel->efx; | |
422 | int rc; | |
423 | ||
424 | EFX_WARN_ON_PARANOID(channel->eventq_init); | |
425 | ||
426 | netif_dbg(efx, drv, efx->net_dev, | |
427 | "chan %d init event queue\n", channel->channel); | |
428 | ||
429 | rc = efx_nic_init_eventq(channel); | |
430 | if (rc == 0) { | |
431 | efx->type->push_irq_moderation(channel); | |
432 | channel->eventq_read_ptr = 0; | |
433 | channel->eventq_init = true; | |
434 | } | |
435 | return rc; | |
436 | } | |
437 | ||
438 | /* Enable event queue processing and NAPI */ | |
439 | void efx_start_eventq(struct efx_channel *channel) | |
440 | { | |
441 | netif_dbg(channel->efx, ifup, channel->efx->net_dev, | |
442 | "chan %d start event queue\n", channel->channel); | |
443 | ||
444 | /* Make sure the NAPI handler sees the enabled flag set */ | |
445 | channel->enabled = true; | |
446 | smp_wmb(); | |
447 | ||
448 | napi_enable(&channel->napi_str); | |
449 | efx_nic_eventq_read_ack(channel); | |
450 | } | |
451 | ||
452 | /* Disable event queue processing and NAPI */ | |
453 | void efx_stop_eventq(struct efx_channel *channel) | |
454 | { | |
455 | if (!channel->enabled) | |
456 | return; | |
457 | ||
458 | napi_disable(&channel->napi_str); | |
459 | channel->enabled = false; | |
460 | } | |
461 | ||
462 | void efx_fini_eventq(struct efx_channel *channel) | |
463 | { | |
464 | if (!channel->eventq_init) | |
465 | return; | |
466 | ||
467 | netif_dbg(channel->efx, drv, channel->efx->net_dev, | |
468 | "chan %d fini event queue\n", channel->channel); | |
469 | ||
470 | efx_nic_fini_eventq(channel); | |
471 | channel->eventq_init = false; | |
472 | } | |
473 | ||
474 | void efx_remove_eventq(struct efx_channel *channel) | |
475 | { | |
476 | netif_dbg(channel->efx, drv, channel->efx->net_dev, | |
477 | "chan %d remove event queue\n", channel->channel); | |
478 | ||
479 | efx_nic_remove_eventq(channel); | |
480 | } | |
481 | ||
83975485 AM |
482 | /************************************************************************** |
483 | * | |
484 | * Channel handling | |
485 | * | |
486 | *************************************************************************/ | |
487 | ||
488 | /* Allocate and initialise a channel structure. */ | |
489 | struct efx_channel * | |
490 | efx_alloc_channel(struct efx_nic *efx, int i, struct efx_channel *old_channel) | |
491 | { | |
492 | struct efx_rx_queue *rx_queue; | |
493 | struct efx_tx_queue *tx_queue; | |
494 | struct efx_channel *channel; | |
495 | int j; | |
496 | ||
497 | channel = kzalloc(sizeof(*channel), GFP_KERNEL); | |
498 | if (!channel) | |
499 | return NULL; | |
500 | ||
501 | channel->efx = efx; | |
502 | channel->channel = i; | |
503 | channel->type = &efx_default_channel_type; | |
504 | ||
505 | for (j = 0; j < EFX_TXQ_TYPES; j++) { | |
506 | tx_queue = &channel->tx_queue[j]; | |
507 | tx_queue->efx = efx; | |
508 | tx_queue->queue = i * EFX_TXQ_TYPES + j; | |
509 | tx_queue->channel = channel; | |
510 | } | |
511 | ||
512 | #ifdef CONFIG_RFS_ACCEL | |
513 | INIT_DELAYED_WORK(&channel->filter_work, efx_filter_rfs_expire); | |
514 | #endif | |
515 | ||
516 | rx_queue = &channel->rx_queue; | |
517 | rx_queue->efx = efx; | |
518 | timer_setup(&rx_queue->slow_fill, efx_rx_slow_fill, 0); | |
519 | ||
520 | return channel; | |
521 | } | |
522 | ||
523 | int efx_init_channels(struct efx_nic *efx) | |
524 | { | |
525 | unsigned int i; | |
526 | ||
527 | for (i = 0; i < EFX_MAX_CHANNELS; i++) { | |
528 | efx->channel[i] = efx_alloc_channel(efx, i, NULL); | |
529 | if (!efx->channel[i]) | |
530 | return -ENOMEM; | |
531 | efx->msi_context[i].efx = efx; | |
532 | efx->msi_context[i].index = i; | |
533 | } | |
534 | ||
535 | /* Higher numbered interrupt modes are less capable! */ | |
536 | if (WARN_ON_ONCE(efx->type->max_interrupt_mode > | |
537 | efx->type->min_interrupt_mode)) { | |
538 | return -EIO; | |
539 | } | |
540 | efx->interrupt_mode = max(efx->type->max_interrupt_mode, | |
541 | interrupt_mode); | |
542 | efx->interrupt_mode = min(efx->type->min_interrupt_mode, | |
543 | interrupt_mode); | |
544 | ||
545 | return 0; | |
546 | } | |
547 | ||
548 | void efx_fini_channels(struct efx_nic *efx) | |
549 | { | |
550 | unsigned int i; | |
551 | ||
552 | for (i = 0; i < EFX_MAX_CHANNELS; i++) | |
553 | if (efx->channel[i]) { | |
554 | kfree(efx->channel[i]); | |
555 | efx->channel[i] = NULL; | |
556 | } | |
557 | } | |
558 | ||
559 | /* Allocate and initialise a channel structure, copying parameters | |
560 | * (but not resources) from an old channel structure. | |
561 | */ | |
562 | struct efx_channel *efx_copy_channel(const struct efx_channel *old_channel) | |
563 | { | |
564 | struct efx_rx_queue *rx_queue; | |
565 | struct efx_tx_queue *tx_queue; | |
566 | struct efx_channel *channel; | |
567 | int j; | |
568 | ||
569 | channel = kmalloc(sizeof(*channel), GFP_KERNEL); | |
570 | if (!channel) | |
571 | return NULL; | |
572 | ||
573 | *channel = *old_channel; | |
574 | ||
575 | channel->napi_dev = NULL; | |
576 | INIT_HLIST_NODE(&channel->napi_str.napi_hash_node); | |
577 | channel->napi_str.napi_id = 0; | |
578 | channel->napi_str.state = 0; | |
579 | memset(&channel->eventq, 0, sizeof(channel->eventq)); | |
580 | ||
581 | for (j = 0; j < EFX_TXQ_TYPES; j++) { | |
582 | tx_queue = &channel->tx_queue[j]; | |
583 | if (tx_queue->channel) | |
584 | tx_queue->channel = channel; | |
585 | tx_queue->buffer = NULL; | |
4b1bd9db | 586 | tx_queue->cb_page = NULL; |
83975485 AM |
587 | memset(&tx_queue->txd, 0, sizeof(tx_queue->txd)); |
588 | } | |
589 | ||
590 | rx_queue = &channel->rx_queue; | |
591 | rx_queue->buffer = NULL; | |
592 | memset(&rx_queue->rxd, 0, sizeof(rx_queue->rxd)); | |
593 | timer_setup(&rx_queue->slow_fill, efx_rx_slow_fill, 0); | |
594 | #ifdef CONFIG_RFS_ACCEL | |
595 | INIT_DELAYED_WORK(&channel->filter_work, efx_filter_rfs_expire); | |
596 | #endif | |
597 | ||
598 | return channel; | |
599 | } | |
600 | ||
601 | static int efx_probe_channel(struct efx_channel *channel) | |
602 | { | |
603 | struct efx_tx_queue *tx_queue; | |
604 | struct efx_rx_queue *rx_queue; | |
605 | int rc; | |
606 | ||
607 | netif_dbg(channel->efx, probe, channel->efx->net_dev, | |
608 | "creating channel %d\n", channel->channel); | |
609 | ||
610 | rc = channel->type->pre_probe(channel); | |
611 | if (rc) | |
612 | goto fail; | |
613 | ||
614 | rc = efx_probe_eventq(channel); | |
615 | if (rc) | |
616 | goto fail; | |
617 | ||
618 | efx_for_each_channel_tx_queue(tx_queue, channel) { | |
619 | rc = efx_probe_tx_queue(tx_queue); | |
620 | if (rc) | |
621 | goto fail; | |
622 | } | |
623 | ||
624 | efx_for_each_channel_rx_queue(rx_queue, channel) { | |
625 | rc = efx_probe_rx_queue(rx_queue); | |
626 | if (rc) | |
627 | goto fail; | |
628 | } | |
629 | ||
630 | channel->rx_list = NULL; | |
631 | ||
632 | return 0; | |
633 | ||
634 | fail: | |
635 | efx_remove_channel(channel); | |
636 | return rc; | |
637 | } | |
638 | ||
639 | void efx_get_channel_name(struct efx_channel *channel, char *buf, size_t len) | |
640 | { | |
641 | struct efx_nic *efx = channel->efx; | |
642 | const char *type; | |
643 | int number; | |
644 | ||
645 | number = channel->channel; | |
646 | ||
647 | if (number >= efx->xdp_channel_offset && | |
648 | !WARN_ON_ONCE(!efx->n_xdp_channels)) { | |
649 | type = "-xdp"; | |
650 | number -= efx->xdp_channel_offset; | |
651 | } else if (efx->tx_channel_offset == 0) { | |
652 | type = ""; | |
653 | } else if (number < efx->tx_channel_offset) { | |
654 | type = "-rx"; | |
655 | } else { | |
656 | type = "-tx"; | |
657 | number -= efx->tx_channel_offset; | |
658 | } | |
659 | snprintf(buf, len, "%s%s-%d", efx->name, type, number); | |
660 | } | |
661 | ||
662 | void efx_set_channel_names(struct efx_nic *efx) | |
663 | { | |
664 | struct efx_channel *channel; | |
665 | ||
666 | efx_for_each_channel(channel, efx) | |
667 | channel->type->get_name(channel, | |
668 | efx->msi_context[channel->channel].name, | |
669 | sizeof(efx->msi_context[0].name)); | |
670 | } | |
671 | ||
672 | int efx_probe_channels(struct efx_nic *efx) | |
673 | { | |
674 | struct efx_channel *channel; | |
675 | int rc; | |
676 | ||
677 | /* Restart special buffer allocation */ | |
678 | efx->next_buffer_table = 0; | |
679 | ||
680 | /* Probe channels in reverse, so that any 'extra' channels | |
681 | * use the start of the buffer table. This allows the traffic | |
682 | * channels to be resized without moving them or wasting the | |
683 | * entries before them. | |
684 | */ | |
685 | efx_for_each_channel_rev(channel, efx) { | |
686 | rc = efx_probe_channel(channel); | |
687 | if (rc) { | |
688 | netif_err(efx, probe, efx->net_dev, | |
689 | "failed to create channel %d\n", | |
690 | channel->channel); | |
691 | goto fail; | |
692 | } | |
693 | } | |
694 | efx_set_channel_names(efx); | |
695 | ||
696 | return 0; | |
697 | ||
698 | fail: | |
699 | efx_remove_channels(efx); | |
700 | return rc; | |
701 | } | |
702 | ||
703 | void efx_remove_channel(struct efx_channel *channel) | |
704 | { | |
705 | struct efx_tx_queue *tx_queue; | |
706 | struct efx_rx_queue *rx_queue; | |
707 | ||
708 | netif_dbg(channel->efx, drv, channel->efx->net_dev, | |
709 | "destroy chan %d\n", channel->channel); | |
710 | ||
711 | efx_for_each_channel_rx_queue(rx_queue, channel) | |
712 | efx_remove_rx_queue(rx_queue); | |
713 | efx_for_each_possible_channel_tx_queue(tx_queue, channel) | |
714 | efx_remove_tx_queue(tx_queue); | |
715 | efx_remove_eventq(channel); | |
716 | channel->type->post_remove(channel); | |
717 | } | |
718 | ||
719 | void efx_remove_channels(struct efx_nic *efx) | |
720 | { | |
721 | struct efx_channel *channel; | |
722 | ||
723 | efx_for_each_channel(channel, efx) | |
724 | efx_remove_channel(channel); | |
725 | ||
726 | kfree(efx->xdp_tx_queues); | |
727 | } | |
728 | ||
729 | int efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries) | |
730 | { | |
731 | struct efx_channel *other_channel[EFX_MAX_CHANNELS], *channel; | |
732 | unsigned int i, next_buffer_table = 0; | |
733 | u32 old_rxq_entries, old_txq_entries; | |
734 | int rc, rc2; | |
735 | ||
736 | rc = efx_check_disabled(efx); | |
737 | if (rc) | |
738 | return rc; | |
739 | ||
740 | /* Not all channels should be reallocated. We must avoid | |
741 | * reallocating their buffer table entries. | |
742 | */ | |
743 | efx_for_each_channel(channel, efx) { | |
744 | struct efx_rx_queue *rx_queue; | |
745 | struct efx_tx_queue *tx_queue; | |
746 | ||
747 | if (channel->type->copy) | |
748 | continue; | |
749 | next_buffer_table = max(next_buffer_table, | |
750 | channel->eventq.index + | |
751 | channel->eventq.entries); | |
752 | efx_for_each_channel_rx_queue(rx_queue, channel) | |
753 | next_buffer_table = max(next_buffer_table, | |
754 | rx_queue->rxd.index + | |
755 | rx_queue->rxd.entries); | |
756 | efx_for_each_channel_tx_queue(tx_queue, channel) | |
757 | next_buffer_table = max(next_buffer_table, | |
758 | tx_queue->txd.index + | |
759 | tx_queue->txd.entries); | |
760 | } | |
761 | ||
762 | efx_device_detach_sync(efx); | |
763 | efx_stop_all(efx); | |
764 | efx_soft_disable_interrupts(efx); | |
765 | ||
766 | /* Clone channels (where possible) */ | |
767 | memset(other_channel, 0, sizeof(other_channel)); | |
768 | for (i = 0; i < efx->n_channels; i++) { | |
769 | channel = efx->channel[i]; | |
770 | if (channel->type->copy) | |
771 | channel = channel->type->copy(channel); | |
772 | if (!channel) { | |
773 | rc = -ENOMEM; | |
774 | goto out; | |
775 | } | |
776 | other_channel[i] = channel; | |
777 | } | |
778 | ||
779 | /* Swap entry counts and channel pointers */ | |
780 | old_rxq_entries = efx->rxq_entries; | |
781 | old_txq_entries = efx->txq_entries; | |
782 | efx->rxq_entries = rxq_entries; | |
783 | efx->txq_entries = txq_entries; | |
784 | for (i = 0; i < efx->n_channels; i++) { | |
785 | channel = efx->channel[i]; | |
786 | efx->channel[i] = other_channel[i]; | |
787 | other_channel[i] = channel; | |
788 | } | |
789 | ||
790 | /* Restart buffer table allocation */ | |
791 | efx->next_buffer_table = next_buffer_table; | |
792 | ||
793 | for (i = 0; i < efx->n_channels; i++) { | |
794 | channel = efx->channel[i]; | |
795 | if (!channel->type->copy) | |
796 | continue; | |
797 | rc = efx_probe_channel(channel); | |
798 | if (rc) | |
799 | goto rollback; | |
800 | efx_init_napi_channel(efx->channel[i]); | |
801 | } | |
802 | ||
803 | out: | |
804 | /* Destroy unused channel structures */ | |
805 | for (i = 0; i < efx->n_channels; i++) { | |
806 | channel = other_channel[i]; | |
807 | if (channel && channel->type->copy) { | |
808 | efx_fini_napi_channel(channel); | |
809 | efx_remove_channel(channel); | |
810 | kfree(channel); | |
811 | } | |
812 | } | |
813 | ||
814 | rc2 = efx_soft_enable_interrupts(efx); | |
815 | if (rc2) { | |
816 | rc = rc ? rc : rc2; | |
817 | netif_err(efx, drv, efx->net_dev, | |
818 | "unable to restart interrupts on channel reallocation\n"); | |
819 | efx_schedule_reset(efx, RESET_TYPE_DISABLE); | |
820 | } else { | |
821 | efx_start_all(efx); | |
822 | efx_device_attach_if_not_resetting(efx); | |
823 | } | |
824 | return rc; | |
825 | ||
826 | rollback: | |
827 | /* Swap back */ | |
828 | efx->rxq_entries = old_rxq_entries; | |
829 | efx->txq_entries = old_txq_entries; | |
830 | for (i = 0; i < efx->n_channels; i++) { | |
831 | channel = efx->channel[i]; | |
832 | efx->channel[i] = other_channel[i]; | |
833 | other_channel[i] = channel; | |
834 | } | |
835 | goto out; | |
836 | } | |
837 | ||
838 | int efx_set_channels(struct efx_nic *efx) | |
839 | { | |
840 | struct efx_channel *channel; | |
841 | struct efx_tx_queue *tx_queue; | |
842 | int xdp_queue_number; | |
843 | ||
844 | efx->tx_channel_offset = | |
845 | efx_separate_tx_channels ? | |
846 | efx->n_channels - efx->n_tx_channels : 0; | |
847 | ||
848 | if (efx->xdp_tx_queue_count) { | |
849 | EFX_WARN_ON_PARANOID(efx->xdp_tx_queues); | |
850 | ||
851 | /* Allocate array for XDP TX queue lookup. */ | |
852 | efx->xdp_tx_queues = kcalloc(efx->xdp_tx_queue_count, | |
853 | sizeof(*efx->xdp_tx_queues), | |
854 | GFP_KERNEL); | |
855 | if (!efx->xdp_tx_queues) | |
856 | return -ENOMEM; | |
857 | } | |
858 | ||
859 | /* We need to mark which channels really have RX and TX | |
860 | * queues, and adjust the TX queue numbers if we have separate | |
861 | * RX-only and TX-only channels. | |
862 | */ | |
863 | xdp_queue_number = 0; | |
864 | efx_for_each_channel(channel, efx) { | |
865 | if (channel->channel < efx->n_rx_channels) | |
866 | channel->rx_queue.core_index = channel->channel; | |
867 | else | |
868 | channel->rx_queue.core_index = -1; | |
869 | ||
870 | efx_for_each_channel_tx_queue(tx_queue, channel) { | |
871 | tx_queue->queue -= (efx->tx_channel_offset * | |
872 | EFX_TXQ_TYPES); | |
873 | ||
874 | if (efx_channel_is_xdp_tx(channel) && | |
875 | xdp_queue_number < efx->xdp_tx_queue_count) { | |
876 | efx->xdp_tx_queues[xdp_queue_number] = tx_queue; | |
877 | xdp_queue_number++; | |
878 | } | |
879 | } | |
880 | } | |
881 | return 0; | |
882 | } | |
883 | ||
884 | bool efx_default_channel_want_txqs(struct efx_channel *channel) | |
885 | { | |
886 | return channel->channel - channel->efx->tx_channel_offset < | |
887 | channel->efx->n_tx_channels; | |
888 | } | |
889 | ||
e20ba5b1 AM |
890 | /************* |
891 | * START/STOP | |
892 | *************/ | |
893 | ||
894 | int efx_soft_enable_interrupts(struct efx_nic *efx) | |
895 | { | |
896 | struct efx_channel *channel, *end_channel; | |
897 | int rc; | |
898 | ||
899 | BUG_ON(efx->state == STATE_DISABLED); | |
900 | ||
901 | efx->irq_soft_enabled = true; | |
902 | smp_wmb(); | |
903 | ||
904 | efx_for_each_channel(channel, efx) { | |
905 | if (!channel->type->keep_eventq) { | |
906 | rc = efx_init_eventq(channel); | |
907 | if (rc) | |
908 | goto fail; | |
909 | } | |
910 | efx_start_eventq(channel); | |
911 | } | |
912 | ||
913 | efx_mcdi_mode_event(efx); | |
914 | ||
915 | return 0; | |
916 | fail: | |
917 | end_channel = channel; | |
918 | efx_for_each_channel(channel, efx) { | |
919 | if (channel == end_channel) | |
920 | break; | |
921 | efx_stop_eventq(channel); | |
922 | if (!channel->type->keep_eventq) | |
923 | efx_fini_eventq(channel); | |
924 | } | |
925 | ||
926 | return rc; | |
927 | } | |
928 | ||
929 | void efx_soft_disable_interrupts(struct efx_nic *efx) | |
930 | { | |
931 | struct efx_channel *channel; | |
932 | ||
933 | if (efx->state == STATE_DISABLED) | |
934 | return; | |
935 | ||
936 | efx_mcdi_mode_poll(efx); | |
937 | ||
938 | efx->irq_soft_enabled = false; | |
939 | smp_wmb(); | |
940 | ||
941 | if (efx->legacy_irq) | |
942 | synchronize_irq(efx->legacy_irq); | |
943 | ||
944 | efx_for_each_channel(channel, efx) { | |
945 | if (channel->irq) | |
946 | synchronize_irq(channel->irq); | |
947 | ||
948 | efx_stop_eventq(channel); | |
949 | if (!channel->type->keep_eventq) | |
950 | efx_fini_eventq(channel); | |
951 | } | |
952 | ||
953 | /* Flush the asynchronous MCDI request queue */ | |
954 | efx_mcdi_flush_async(efx); | |
955 | } | |
956 | ||
957 | int efx_enable_interrupts(struct efx_nic *efx) | |
958 | { | |
959 | struct efx_channel *channel, *end_channel; | |
960 | int rc; | |
961 | ||
962 | /* TODO: Is this really a bug? */ | |
963 | BUG_ON(efx->state == STATE_DISABLED); | |
964 | ||
965 | if (efx->eeh_disabled_legacy_irq) { | |
966 | enable_irq(efx->legacy_irq); | |
967 | efx->eeh_disabled_legacy_irq = false; | |
968 | } | |
969 | ||
970 | efx->type->irq_enable_master(efx); | |
971 | ||
972 | efx_for_each_channel(channel, efx) { | |
973 | if (channel->type->keep_eventq) { | |
974 | rc = efx_init_eventq(channel); | |
975 | if (rc) | |
976 | goto fail; | |
977 | } | |
978 | } | |
979 | ||
980 | rc = efx_soft_enable_interrupts(efx); | |
981 | if (rc) | |
982 | goto fail; | |
983 | ||
984 | return 0; | |
985 | ||
986 | fail: | |
987 | end_channel = channel; | |
988 | efx_for_each_channel(channel, efx) { | |
989 | if (channel == end_channel) | |
990 | break; | |
991 | if (channel->type->keep_eventq) | |
992 | efx_fini_eventq(channel); | |
993 | } | |
994 | ||
995 | efx->type->irq_disable_non_ev(efx); | |
996 | ||
997 | return rc; | |
998 | } | |
999 | ||
1000 | void efx_disable_interrupts(struct efx_nic *efx) | |
1001 | { | |
1002 | struct efx_channel *channel; | |
1003 | ||
1004 | efx_soft_disable_interrupts(efx); | |
1005 | ||
1006 | efx_for_each_channel(channel, efx) { | |
1007 | if (channel->type->keep_eventq) | |
1008 | efx_fini_eventq(channel); | |
1009 | } | |
1010 | ||
1011 | efx->type->irq_disable_non_ev(efx); | |
1012 | } | |
1013 | ||
1014 | void efx_start_channels(struct efx_nic *efx) | |
1015 | { | |
1016 | struct efx_tx_queue *tx_queue; | |
1017 | struct efx_rx_queue *rx_queue; | |
1018 | struct efx_channel *channel; | |
1019 | ||
1020 | efx_for_each_channel(channel, efx) { | |
1021 | efx_for_each_channel_tx_queue(tx_queue, channel) { | |
1022 | efx_init_tx_queue(tx_queue); | |
1023 | atomic_inc(&efx->active_queues); | |
1024 | } | |
1025 | ||
1026 | efx_for_each_channel_rx_queue(rx_queue, channel) { | |
1027 | efx_init_rx_queue(rx_queue); | |
1028 | atomic_inc(&efx->active_queues); | |
1029 | efx_stop_eventq(channel); | |
1030 | efx_fast_push_rx_descriptors(rx_queue, false); | |
1031 | efx_start_eventq(channel); | |
1032 | } | |
1033 | ||
1034 | WARN_ON(channel->rx_pkt_n_frags); | |
1035 | } | |
1036 | } | |
1037 | ||
1038 | void efx_stop_channels(struct efx_nic *efx) | |
1039 | { | |
1040 | struct efx_tx_queue *tx_queue; | |
1041 | struct efx_rx_queue *rx_queue; | |
1042 | struct efx_channel *channel; | |
b5775b47 | 1043 | int rc = 0; |
e20ba5b1 AM |
1044 | |
1045 | /* Stop RX refill */ | |
1046 | efx_for_each_channel(channel, efx) { | |
1047 | efx_for_each_channel_rx_queue(rx_queue, channel) | |
1048 | rx_queue->refill_enabled = false; | |
1049 | } | |
1050 | ||
1051 | efx_for_each_channel(channel, efx) { | |
1052 | /* RX packet processing is pipelined, so wait for the | |
1053 | * NAPI handler to complete. At least event queue 0 | |
1054 | * might be kept active by non-data events, so don't | |
1055 | * use napi_synchronize() but actually disable NAPI | |
1056 | * temporarily. | |
1057 | */ | |
1058 | if (efx_channel_has_rx_queue(channel)) { | |
1059 | efx_stop_eventq(channel); | |
1060 | efx_start_eventq(channel); | |
1061 | } | |
1062 | } | |
1063 | ||
b5775b47 AM |
1064 | if (efx->type->fini_dmaq) |
1065 | rc = efx->type->fini_dmaq(efx); | |
1066 | ||
e20ba5b1 AM |
1067 | if (rc) { |
1068 | netif_err(efx, drv, efx->net_dev, "failed to flush queues\n"); | |
1069 | } else { | |
1070 | netif_dbg(efx, drv, efx->net_dev, | |
1071 | "successfully flushed all queues\n"); | |
1072 | } | |
1073 | ||
1074 | efx_for_each_channel(channel, efx) { | |
1075 | efx_for_each_channel_rx_queue(rx_queue, channel) | |
1076 | efx_fini_rx_queue(rx_queue); | |
1077 | efx_for_each_possible_channel_tx_queue(tx_queue, channel) | |
1078 | efx_fini_tx_queue(tx_queue); | |
1079 | } | |
1080 | } | |
1081 | ||
768fd266 AM |
1082 | /************************************************************************** |
1083 | * | |
1084 | * NAPI interface | |
1085 | * | |
1086 | *************************************************************************/ | |
1087 | ||
1088 | /* Process channel's event queue | |
1089 | * | |
1090 | * This function is responsible for processing the event queue of a | |
1091 | * single channel. The caller must guarantee that this function will | |
1092 | * never be concurrently called more than once on the same channel, | |
1093 | * though different channels may be being processed concurrently. | |
1094 | */ | |
1095 | static int efx_process_channel(struct efx_channel *channel, int budget) | |
1096 | { | |
1097 | struct efx_tx_queue *tx_queue; | |
1098 | struct list_head rx_list; | |
1099 | int spent; | |
1100 | ||
1101 | if (unlikely(!channel->enabled)) | |
1102 | return 0; | |
1103 | ||
1104 | /* Prepare the batch receive list */ | |
1105 | EFX_WARN_ON_PARANOID(channel->rx_list != NULL); | |
1106 | INIT_LIST_HEAD(&rx_list); | |
1107 | channel->rx_list = &rx_list; | |
1108 | ||
1109 | efx_for_each_channel_tx_queue(tx_queue, channel) { | |
1110 | tx_queue->pkts_compl = 0; | |
1111 | tx_queue->bytes_compl = 0; | |
1112 | } | |
1113 | ||
1114 | spent = efx_nic_process_eventq(channel, budget); | |
1115 | if (spent && efx_channel_has_rx_queue(channel)) { | |
1116 | struct efx_rx_queue *rx_queue = | |
1117 | efx_channel_get_rx_queue(channel); | |
1118 | ||
1119 | efx_rx_flush_packet(channel); | |
1120 | efx_fast_push_rx_descriptors(rx_queue, true); | |
1121 | } | |
1122 | ||
1123 | /* Update BQL */ | |
1124 | efx_for_each_channel_tx_queue(tx_queue, channel) { | |
1125 | if (tx_queue->bytes_compl) { | |
1126 | netdev_tx_completed_queue(tx_queue->core_txq, | |
1127 | tx_queue->pkts_compl, | |
1128 | tx_queue->bytes_compl); | |
1129 | } | |
1130 | } | |
1131 | ||
1132 | /* Receive any packets we queued up */ | |
1133 | netif_receive_skb_list(channel->rx_list); | |
1134 | channel->rx_list = NULL; | |
1135 | ||
1136 | return spent; | |
1137 | } | |
1138 | ||
1139 | static void efx_update_irq_mod(struct efx_nic *efx, struct efx_channel *channel) | |
1140 | { | |
1141 | int step = efx->irq_mod_step_us; | |
1142 | ||
1143 | if (channel->irq_mod_score < irq_adapt_low_thresh) { | |
1144 | if (channel->irq_moderation_us > step) { | |
1145 | channel->irq_moderation_us -= step; | |
1146 | efx->type->push_irq_moderation(channel); | |
1147 | } | |
1148 | } else if (channel->irq_mod_score > irq_adapt_high_thresh) { | |
1149 | if (channel->irq_moderation_us < | |
1150 | efx->irq_rx_moderation_us) { | |
1151 | channel->irq_moderation_us += step; | |
1152 | efx->type->push_irq_moderation(channel); | |
1153 | } | |
1154 | } | |
1155 | ||
1156 | channel->irq_count = 0; | |
1157 | channel->irq_mod_score = 0; | |
1158 | } | |
1159 | ||
1160 | /* NAPI poll handler | |
1161 | * | |
1162 | * NAPI guarantees serialisation of polls of the same device, which | |
1163 | * provides the guarantee required by efx_process_channel(). | |
1164 | */ | |
1165 | static int efx_poll(struct napi_struct *napi, int budget) | |
1166 | { | |
1167 | struct efx_channel *channel = | |
1168 | container_of(napi, struct efx_channel, napi_str); | |
1169 | struct efx_nic *efx = channel->efx; | |
1170 | int spent; | |
1171 | ||
1172 | netif_vdbg(efx, intr, efx->net_dev, | |
1173 | "channel %d NAPI poll executing on CPU %d\n", | |
1174 | channel->channel, raw_smp_processor_id()); | |
1175 | ||
1176 | spent = efx_process_channel(channel, budget); | |
1177 | ||
1178 | xdp_do_flush_map(); | |
1179 | ||
1180 | if (spent < budget) { | |
1181 | if (efx_channel_has_rx_queue(channel) && | |
1182 | efx->irq_rx_adaptive && | |
1183 | unlikely(++channel->irq_count == 1000)) { | |
1184 | efx_update_irq_mod(efx, channel); | |
1185 | } | |
1186 | ||
1187 | #ifdef CONFIG_RFS_ACCEL | |
1188 | /* Perhaps expire some ARFS filters */ | |
1189 | mod_delayed_work(system_wq, &channel->filter_work, 0); | |
1190 | #endif | |
1191 | ||
1192 | /* There is no race here; although napi_disable() will | |
1193 | * only wait for napi_complete(), this isn't a problem | |
1194 | * since efx_nic_eventq_read_ack() will have no effect if | |
1195 | * interrupts have already been disabled. | |
1196 | */ | |
1197 | if (napi_complete_done(napi, spent)) | |
1198 | efx_nic_eventq_read_ack(channel); | |
1199 | } | |
1200 | ||
1201 | return spent; | |
1202 | } | |
1203 | ||
1204 | void efx_init_napi_channel(struct efx_channel *channel) | |
1205 | { | |
1206 | struct efx_nic *efx = channel->efx; | |
1207 | ||
1208 | channel->napi_dev = efx->net_dev; | |
1209 | netif_napi_add(channel->napi_dev, &channel->napi_str, | |
1210 | efx_poll, napi_weight); | |
1211 | } | |
1212 | ||
1213 | void efx_init_napi(struct efx_nic *efx) | |
1214 | { | |
1215 | struct efx_channel *channel; | |
1216 | ||
1217 | efx_for_each_channel(channel, efx) | |
1218 | efx_init_napi_channel(channel); | |
1219 | } | |
1220 | ||
1221 | void efx_fini_napi_channel(struct efx_channel *channel) | |
1222 | { | |
1223 | if (channel->napi_dev) | |
1224 | netif_napi_del(&channel->napi_str); | |
1225 | ||
1226 | channel->napi_dev = NULL; | |
1227 | } | |
1228 | ||
1229 | void efx_fini_napi(struct efx_nic *efx) | |
1230 | { | |
1231 | struct efx_channel *channel; | |
1232 | ||
1233 | efx_for_each_channel(channel, efx) | |
1234 | efx_fini_napi_channel(channel); | |
1235 | } |