Commit | Line | Data |
---|---|---|
8ceee660 BH |
1 | /**************************************************************************** |
2 | * Driver for Solarflare Solarstorm network controllers and boards | |
3 | * Copyright 2005-2006 Fen Systems Ltd. | |
0a6f40c6 | 4 | * Copyright 2005-2011 Solarflare Communications Inc. |
8ceee660 BH |
5 | * |
6 | * This program is free software; you can redistribute it and/or modify it | |
7 | * under the terms of the GNU General Public License version 2 as published | |
8 | * by the Free Software Foundation, incorporated herein by reference. | |
9 | */ | |
10 | ||
11 | #include <linux/socket.h> | |
12 | #include <linux/in.h> | |
5a0e3ad6 | 13 | #include <linux/slab.h> |
8ceee660 BH |
14 | #include <linux/ip.h> |
15 | #include <linux/tcp.h> | |
16 | #include <linux/udp.h> | |
70c71606 | 17 | #include <linux/prefetch.h> |
6eb07caf | 18 | #include <linux/moduleparam.h> |
8ceee660 BH |
19 | #include <net/ip.h> |
20 | #include <net/checksum.h> | |
21 | #include "net_driver.h" | |
8ceee660 | 22 | #include "efx.h" |
744093c9 | 23 | #include "nic.h" |
3273c2e8 | 24 | #include "selftest.h" |
8ceee660 BH |
25 | #include "workarounds.h" |
26 | ||
27 | /* Number of RX descriptors pushed at once. */ | |
28 | #define EFX_RX_BATCH 8 | |
29 | ||
62b330ba SH |
30 | /* Maximum size of a buffer sharing a page */ |
31 | #define EFX_RX_HALF_PAGE ((PAGE_SIZE >> 1) - sizeof(struct efx_rx_page_state)) | |
32 | ||
8ceee660 BH |
33 | /* Size of buffer allocated for skb header area. */ |
34 | #define EFX_SKB_HEADERS 64u | |
35 | ||
36 | /* | |
37 | * rx_alloc_method - RX buffer allocation method | |
38 | * | |
39 | * This driver supports two methods for allocating and using RX buffers: | |
40 | * each RX buffer may be backed by an skb or by an order-n page. | |
41 | * | |
4afb7527 | 42 | * When GRO is in use then the second method has a lower overhead, |
8ceee660 BH |
43 | * since we don't have to allocate then free skbs on reassembled frames. |
44 | * | |
45 | * Values: | |
46 | * - RX_ALLOC_METHOD_AUTO = 0 | |
47 | * - RX_ALLOC_METHOD_SKB = 1 | |
48 | * - RX_ALLOC_METHOD_PAGE = 2 | |
49 | * | |
50 | * The heuristic for %RX_ALLOC_METHOD_AUTO is a simple hysteresis count | |
51 | * controlled by the parameters below. | |
52 | * | |
53 | * - Since pushing and popping descriptors are separated by the rx_queue | |
54 | * size, so the watermarks should be ~rxd_size. | |
4afb7527 | 55 | * - The performance win by using page-based allocation for GRO is less |
56 | * than the performance hit of using page-based allocation of non-GRO, | |
8ceee660 BH |
57 | * so the watermarks should reflect this. |
58 | * | |
59 | * Per channel we maintain a single variable, updated by each channel: | |
60 | * | |
4afb7527 | 61 | * rx_alloc_level += (gro_performed ? RX_ALLOC_FACTOR_GRO : |
8ceee660 BH |
62 | * RX_ALLOC_FACTOR_SKB) |
63 | * Per NAPI poll interval, we constrain rx_alloc_level to 0..MAX (which | |
64 | * limits the hysteresis), and update the allocation strategy: | |
65 | * | |
4afb7527 | 66 | * rx_alloc_method = (rx_alloc_level > RX_ALLOC_LEVEL_GRO ? |
8ceee660 BH |
67 | * RX_ALLOC_METHOD_PAGE : RX_ALLOC_METHOD_SKB) |
68 | */ | |
c3c63365 | 69 | static int rx_alloc_method = RX_ALLOC_METHOD_AUTO; |
8ceee660 | 70 | |
4afb7527 | 71 | #define RX_ALLOC_LEVEL_GRO 0x2000 |
8ceee660 | 72 | #define RX_ALLOC_LEVEL_MAX 0x3000 |
4afb7527 | 73 | #define RX_ALLOC_FACTOR_GRO 1 |
8ceee660 BH |
74 | #define RX_ALLOC_FACTOR_SKB (-2) |
75 | ||
76 | /* This is the percentage fill level below which new RX descriptors | |
77 | * will be added to the RX descriptor ring. | |
78 | */ | |
64235187 | 79 | static unsigned int rx_refill_threshold; |
8ceee660 | 80 | |
8ceee660 BH |
81 | /* |
82 | * RX maximum head room required. | |
83 | * | |
84 | * This must be at least 1 to prevent overflow and at least 2 to allow | |
62b330ba | 85 | * pipelined receives. |
8ceee660 | 86 | */ |
62b330ba | 87 | #define EFX_RXD_HEAD_ROOM 2 |
8ceee660 | 88 | |
a526f140 SH |
89 | /* Offset of ethernet header within page */ |
90 | static inline unsigned int efx_rx_buf_offset(struct efx_nic *efx, | |
91 | struct efx_rx_buffer *buf) | |
55668611 | 92 | { |
b590ace0 | 93 | return buf->page_offset + efx->type->rx_buffer_hash_size; |
55668611 BH |
94 | } |
95 | static inline unsigned int efx_rx_buf_size(struct efx_nic *efx) | |
96 | { | |
97 | return PAGE_SIZE << efx->rx_buffer_order; | |
98 | } | |
8ceee660 | 99 | |
a526f140 | 100 | static u8 *efx_rx_buf_eh(struct efx_nic *efx, struct efx_rx_buffer *buf) |
39c9cf07 | 101 | { |
db339569 | 102 | if (buf->flags & EFX_RX_BUF_PAGE) |
a526f140 SH |
103 | return page_address(buf->u.page) + efx_rx_buf_offset(efx, buf); |
104 | else | |
0beaca2c | 105 | return (u8 *)buf->u.skb->data + efx->type->rx_buffer_hash_size; |
a526f140 SH |
106 | } |
107 | ||
108 | static inline u32 efx_rx_buf_hash(const u8 *eh) | |
109 | { | |
110 | /* The ethernet header is always directly after any hash. */ | |
39c9cf07 | 111 | #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) || NET_IP_ALIGN % 4 == 0 |
a526f140 | 112 | return __le32_to_cpup((const __le32 *)(eh - 4)); |
39c9cf07 | 113 | #else |
a526f140 | 114 | const u8 *data = eh - 4; |
0beaca2c BH |
115 | return (u32)data[0] | |
116 | (u32)data[1] << 8 | | |
117 | (u32)data[2] << 16 | | |
118 | (u32)data[3] << 24; | |
39c9cf07 BH |
119 | #endif |
120 | } | |
121 | ||
8ceee660 | 122 | /** |
f7d6f379 | 123 | * efx_init_rx_buffers_skb - create EFX_RX_BATCH skb-based RX buffers |
8ceee660 BH |
124 | * |
125 | * @rx_queue: Efx RX queue | |
8ceee660 | 126 | * |
f7d6f379 SH |
127 | * This allocates EFX_RX_BATCH skbs, maps them for DMA, and populates a |
128 | * struct efx_rx_buffer for each one. Return a negative error code or 0 | |
129 | * on success. May fail having only inserted fewer than EFX_RX_BATCH | |
130 | * buffers. | |
8ceee660 | 131 | */ |
f7d6f379 | 132 | static int efx_init_rx_buffers_skb(struct efx_rx_queue *rx_queue) |
8ceee660 BH |
133 | { |
134 | struct efx_nic *efx = rx_queue->efx; | |
135 | struct net_device *net_dev = efx->net_dev; | |
f7d6f379 | 136 | struct efx_rx_buffer *rx_buf; |
8ba5366a | 137 | struct sk_buff *skb; |
8ceee660 | 138 | int skb_len = efx->rx_buffer_len; |
f7d6f379 | 139 | unsigned index, count; |
8ceee660 | 140 | |
f7d6f379 | 141 | for (count = 0; count < EFX_RX_BATCH; ++count) { |
ecc910f5 | 142 | index = rx_queue->added_count & rx_queue->ptr_mask; |
f7d6f379 | 143 | rx_buf = efx_rx_buffer(rx_queue, index); |
8ceee660 | 144 | |
8ba5366a SH |
145 | rx_buf->u.skb = skb = netdev_alloc_skb(net_dev, skb_len); |
146 | if (unlikely(!skb)) | |
f7d6f379 | 147 | return -ENOMEM; |
8ceee660 | 148 | |
ff3bc1e7 | 149 | /* Adjust the SKB for padding */ |
8ba5366a | 150 | skb_reserve(skb, NET_IP_ALIGN); |
f7d6f379 | 151 | rx_buf->len = skb_len - NET_IP_ALIGN; |
db339569 | 152 | rx_buf->flags = 0; |
f7d6f379 | 153 | |
0e33d870 | 154 | rx_buf->dma_addr = dma_map_single(&efx->pci_dev->dev, |
a526f140 | 155 | skb->data, rx_buf->len, |
0e33d870 BH |
156 | DMA_FROM_DEVICE); |
157 | if (unlikely(dma_mapping_error(&efx->pci_dev->dev, | |
158 | rx_buf->dma_addr))) { | |
8ba5366a SH |
159 | dev_kfree_skb_any(skb); |
160 | rx_buf->u.skb = NULL; | |
f7d6f379 SH |
161 | return -EIO; |
162 | } | |
8ceee660 | 163 | |
f7d6f379 SH |
164 | ++rx_queue->added_count; |
165 | ++rx_queue->alloc_skb_count; | |
8ceee660 BH |
166 | } |
167 | ||
168 | return 0; | |
169 | } | |
170 | ||
171 | /** | |
f7d6f379 | 172 | * efx_init_rx_buffers_page - create EFX_RX_BATCH page-based RX buffers |
8ceee660 BH |
173 | * |
174 | * @rx_queue: Efx RX queue | |
8ceee660 | 175 | * |
f7d6f379 SH |
176 | * This allocates memory for EFX_RX_BATCH receive buffers, maps them for DMA, |
177 | * and populates struct efx_rx_buffers for each one. Return a negative error | |
178 | * code or 0 on success. If a single page can be split between two buffers, | |
179 | * then the page will either be inserted fully, or not at at all. | |
8ceee660 | 180 | */ |
f7d6f379 | 181 | static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue) |
8ceee660 BH |
182 | { |
183 | struct efx_nic *efx = rx_queue->efx; | |
f7d6f379 SH |
184 | struct efx_rx_buffer *rx_buf; |
185 | struct page *page; | |
b590ace0 | 186 | unsigned int page_offset; |
62b330ba | 187 | struct efx_rx_page_state *state; |
f7d6f379 SH |
188 | dma_addr_t dma_addr; |
189 | unsigned index, count; | |
190 | ||
191 | /* We can split a page between two buffers */ | |
192 | BUILD_BUG_ON(EFX_RX_BATCH & 1); | |
193 | ||
194 | for (count = 0; count < EFX_RX_BATCH; ++count) { | |
195 | page = alloc_pages(__GFP_COLD | __GFP_COMP | GFP_ATOMIC, | |
196 | efx->rx_buffer_order); | |
197 | if (unlikely(page == NULL)) | |
8ceee660 | 198 | return -ENOMEM; |
0e33d870 | 199 | dma_addr = dma_map_page(&efx->pci_dev->dev, page, 0, |
f7d6f379 | 200 | efx_rx_buf_size(efx), |
0e33d870 BH |
201 | DMA_FROM_DEVICE); |
202 | if (unlikely(dma_mapping_error(&efx->pci_dev->dev, dma_addr))) { | |
f7d6f379 | 203 | __free_pages(page, efx->rx_buffer_order); |
8ceee660 BH |
204 | return -EIO; |
205 | } | |
b8e02517 | 206 | state = page_address(page); |
62b330ba SH |
207 | state->refcnt = 0; |
208 | state->dma_addr = dma_addr; | |
209 | ||
62b330ba | 210 | dma_addr += sizeof(struct efx_rx_page_state); |
b590ace0 | 211 | page_offset = sizeof(struct efx_rx_page_state); |
f7d6f379 SH |
212 | |
213 | split: | |
ecc910f5 | 214 | index = rx_queue->added_count & rx_queue->ptr_mask; |
f7d6f379 | 215 | rx_buf = efx_rx_buffer(rx_queue, index); |
62b330ba | 216 | rx_buf->dma_addr = dma_addr + EFX_PAGE_IP_ALIGN; |
8ba5366a | 217 | rx_buf->u.page = page; |
c73e787a | 218 | rx_buf->page_offset = page_offset + EFX_PAGE_IP_ALIGN; |
f7d6f379 | 219 | rx_buf->len = efx->rx_buffer_len - EFX_PAGE_IP_ALIGN; |
db339569 | 220 | rx_buf->flags = EFX_RX_BUF_PAGE; |
f7d6f379 SH |
221 | ++rx_queue->added_count; |
222 | ++rx_queue->alloc_page_count; | |
62b330ba | 223 | ++state->refcnt; |
f7d6f379 | 224 | |
62b330ba | 225 | if ((~count & 1) && (efx->rx_buffer_len <= EFX_RX_HALF_PAGE)) { |
f7d6f379 SH |
226 | /* Use the second half of the page */ |
227 | get_page(page); | |
228 | dma_addr += (PAGE_SIZE >> 1); | |
b590ace0 | 229 | page_offset += (PAGE_SIZE >> 1); |
f7d6f379 SH |
230 | ++count; |
231 | goto split; | |
8ceee660 BH |
232 | } |
233 | } | |
234 | ||
8ceee660 BH |
235 | return 0; |
236 | } | |
237 | ||
4d566063 | 238 | static void efx_unmap_rx_buffer(struct efx_nic *efx, |
3a68f19d BH |
239 | struct efx_rx_buffer *rx_buf, |
240 | unsigned int used_len) | |
8ceee660 | 241 | { |
db339569 | 242 | if ((rx_buf->flags & EFX_RX_BUF_PAGE) && rx_buf->u.page) { |
62b330ba SH |
243 | struct efx_rx_page_state *state; |
244 | ||
8ba5366a | 245 | state = page_address(rx_buf->u.page); |
62b330ba | 246 | if (--state->refcnt == 0) { |
0e33d870 | 247 | dma_unmap_page(&efx->pci_dev->dev, |
62b330ba | 248 | state->dma_addr, |
55668611 | 249 | efx_rx_buf_size(efx), |
0e33d870 | 250 | DMA_FROM_DEVICE); |
3a68f19d BH |
251 | } else if (used_len) { |
252 | dma_sync_single_for_cpu(&efx->pci_dev->dev, | |
253 | rx_buf->dma_addr, used_len, | |
254 | DMA_FROM_DEVICE); | |
8ceee660 | 255 | } |
db339569 | 256 | } else if (!(rx_buf->flags & EFX_RX_BUF_PAGE) && rx_buf->u.skb) { |
0e33d870 BH |
257 | dma_unmap_single(&efx->pci_dev->dev, rx_buf->dma_addr, |
258 | rx_buf->len, DMA_FROM_DEVICE); | |
8ceee660 BH |
259 | } |
260 | } | |
261 | ||
4d566063 BH |
262 | static void efx_free_rx_buffer(struct efx_nic *efx, |
263 | struct efx_rx_buffer *rx_buf) | |
8ceee660 | 264 | { |
db339569 | 265 | if ((rx_buf->flags & EFX_RX_BUF_PAGE) && rx_buf->u.page) { |
8ba5366a SH |
266 | __free_pages(rx_buf->u.page, efx->rx_buffer_order); |
267 | rx_buf->u.page = NULL; | |
db339569 | 268 | } else if (!(rx_buf->flags & EFX_RX_BUF_PAGE) && rx_buf->u.skb) { |
8ba5366a SH |
269 | dev_kfree_skb_any(rx_buf->u.skb); |
270 | rx_buf->u.skb = NULL; | |
8ceee660 BH |
271 | } |
272 | } | |
273 | ||
4d566063 BH |
274 | static void efx_fini_rx_buffer(struct efx_rx_queue *rx_queue, |
275 | struct efx_rx_buffer *rx_buf) | |
8ceee660 | 276 | { |
3a68f19d | 277 | efx_unmap_rx_buffer(rx_queue->efx, rx_buf, 0); |
8ceee660 BH |
278 | efx_free_rx_buffer(rx_queue->efx, rx_buf); |
279 | } | |
280 | ||
24455800 SH |
281 | /* Attempt to resurrect the other receive buffer that used to share this page, |
282 | * which had previously been passed up to the kernel and freed. */ | |
283 | static void efx_resurrect_rx_buffer(struct efx_rx_queue *rx_queue, | |
284 | struct efx_rx_buffer *rx_buf) | |
285 | { | |
8ba5366a | 286 | struct efx_rx_page_state *state = page_address(rx_buf->u.page); |
24455800 | 287 | struct efx_rx_buffer *new_buf; |
62b330ba SH |
288 | unsigned fill_level, index; |
289 | ||
290 | /* +1 because efx_rx_packet() incremented removed_count. +1 because | |
291 | * we'd like to insert an additional descriptor whilst leaving | |
292 | * EFX_RXD_HEAD_ROOM for the non-recycle path */ | |
293 | fill_level = (rx_queue->added_count - rx_queue->removed_count + 2); | |
ecc910f5 | 294 | if (unlikely(fill_level > rx_queue->max_fill)) { |
62b330ba SH |
295 | /* We could place "state" on a list, and drain the list in |
296 | * efx_fast_push_rx_descriptors(). For now, this will do. */ | |
297 | return; | |
298 | } | |
24455800 | 299 | |
62b330ba | 300 | ++state->refcnt; |
8ba5366a | 301 | get_page(rx_buf->u.page); |
24455800 | 302 | |
ecc910f5 | 303 | index = rx_queue->added_count & rx_queue->ptr_mask; |
24455800 | 304 | new_buf = efx_rx_buffer(rx_queue, index); |
62b330ba | 305 | new_buf->dma_addr = rx_buf->dma_addr ^ (PAGE_SIZE >> 1); |
8ba5366a | 306 | new_buf->u.page = rx_buf->u.page; |
24455800 | 307 | new_buf->len = rx_buf->len; |
db339569 | 308 | new_buf->flags = EFX_RX_BUF_PAGE; |
24455800 SH |
309 | ++rx_queue->added_count; |
310 | } | |
311 | ||
312 | /* Recycle the given rx buffer directly back into the rx_queue. There is | |
313 | * always room to add this buffer, because we've just popped a buffer. */ | |
314 | static void efx_recycle_rx_buffer(struct efx_channel *channel, | |
315 | struct efx_rx_buffer *rx_buf) | |
316 | { | |
317 | struct efx_nic *efx = channel->efx; | |
f7d12cdc | 318 | struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel); |
24455800 SH |
319 | struct efx_rx_buffer *new_buf; |
320 | unsigned index; | |
321 | ||
db339569 BH |
322 | rx_buf->flags &= EFX_RX_BUF_PAGE; |
323 | ||
324 | if ((rx_buf->flags & EFX_RX_BUF_PAGE) && | |
325 | efx->rx_buffer_len <= EFX_RX_HALF_PAGE && | |
8ba5366a | 326 | page_count(rx_buf->u.page) == 1) |
62b330ba | 327 | efx_resurrect_rx_buffer(rx_queue, rx_buf); |
24455800 | 328 | |
ecc910f5 | 329 | index = rx_queue->added_count & rx_queue->ptr_mask; |
24455800 SH |
330 | new_buf = efx_rx_buffer(rx_queue, index); |
331 | ||
332 | memcpy(new_buf, rx_buf, sizeof(*new_buf)); | |
8ba5366a | 333 | rx_buf->u.page = NULL; |
24455800 SH |
334 | ++rx_queue->added_count; |
335 | } | |
336 | ||
8ceee660 BH |
337 | /** |
338 | * efx_fast_push_rx_descriptors - push new RX descriptors quickly | |
339 | * @rx_queue: RX descriptor queue | |
49ce9c2c | 340 | * |
8ceee660 | 341 | * This will aim to fill the RX descriptor queue up to |
da9ca505 | 342 | * @rx_queue->@max_fill. If there is insufficient atomic |
90d683af SH |
343 | * memory to do so, a slow fill will be scheduled. |
344 | * | |
345 | * The caller must provide serialisation (none is used here). In practise, | |
346 | * this means this function must run from the NAPI handler, or be called | |
347 | * when NAPI is disabled. | |
8ceee660 | 348 | */ |
90d683af | 349 | void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue) |
8ceee660 | 350 | { |
ba1e8a35 | 351 | struct efx_channel *channel = efx_rx_queue_channel(rx_queue); |
f7d6f379 SH |
352 | unsigned fill_level; |
353 | int space, rc = 0; | |
8ceee660 | 354 | |
90d683af | 355 | /* Calculate current fill level, and exit if we don't need to fill */ |
8ceee660 | 356 | fill_level = (rx_queue->added_count - rx_queue->removed_count); |
ecc910f5 | 357 | EFX_BUG_ON_PARANOID(fill_level > rx_queue->efx->rxq_entries); |
8ceee660 | 358 | if (fill_level >= rx_queue->fast_fill_trigger) |
24455800 | 359 | goto out; |
8ceee660 BH |
360 | |
361 | /* Record minimum fill level */ | |
b3475645 | 362 | if (unlikely(fill_level < rx_queue->min_fill)) { |
8ceee660 BH |
363 | if (fill_level) |
364 | rx_queue->min_fill = fill_level; | |
b3475645 | 365 | } |
8ceee660 | 366 | |
da9ca505 | 367 | space = rx_queue->max_fill - fill_level; |
64235187 | 368 | EFX_BUG_ON_PARANOID(space < EFX_RX_BATCH); |
8ceee660 | 369 | |
62776d03 BH |
370 | netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev, |
371 | "RX queue %d fast-filling descriptor ring from" | |
372 | " level %d to level %d using %s allocation\n", | |
ba1e8a35 | 373 | efx_rx_queue_index(rx_queue), fill_level, |
da9ca505 | 374 | rx_queue->max_fill, |
62776d03 | 375 | channel->rx_alloc_push_pages ? "page" : "skb"); |
8ceee660 BH |
376 | |
377 | do { | |
f7d6f379 SH |
378 | if (channel->rx_alloc_push_pages) |
379 | rc = efx_init_rx_buffers_page(rx_queue); | |
380 | else | |
381 | rc = efx_init_rx_buffers_skb(rx_queue); | |
382 | if (unlikely(rc)) { | |
383 | /* Ensure that we don't leave the rx queue empty */ | |
384 | if (rx_queue->added_count == rx_queue->removed_count) | |
385 | efx_schedule_slow_fill(rx_queue); | |
386 | goto out; | |
8ceee660 BH |
387 | } |
388 | } while ((space -= EFX_RX_BATCH) >= EFX_RX_BATCH); | |
389 | ||
62776d03 BH |
390 | netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev, |
391 | "RX queue %d fast-filled descriptor ring " | |
ba1e8a35 | 392 | "to level %d\n", efx_rx_queue_index(rx_queue), |
62776d03 | 393 | rx_queue->added_count - rx_queue->removed_count); |
8ceee660 BH |
394 | |
395 | out: | |
24455800 SH |
396 | if (rx_queue->notified_count != rx_queue->added_count) |
397 | efx_nic_notify_rx_desc(rx_queue); | |
8ceee660 BH |
398 | } |
399 | ||
90d683af | 400 | void efx_rx_slow_fill(unsigned long context) |
8ceee660 | 401 | { |
90d683af | 402 | struct efx_rx_queue *rx_queue = (struct efx_rx_queue *)context; |
8ceee660 | 403 | |
90d683af | 404 | /* Post an event to cause NAPI to run and refill the queue */ |
2ae75dac | 405 | efx_nic_generate_fill_event(rx_queue); |
8ceee660 | 406 | ++rx_queue->slow_fill_count; |
8ceee660 BH |
407 | } |
408 | ||
4d566063 BH |
409 | static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue, |
410 | struct efx_rx_buffer *rx_buf, | |
db339569 | 411 | int len, bool *leak_packet) |
8ceee660 BH |
412 | { |
413 | struct efx_nic *efx = rx_queue->efx; | |
414 | unsigned max_len = rx_buf->len - efx->type->rx_buffer_padding; | |
415 | ||
416 | if (likely(len <= max_len)) | |
417 | return; | |
418 | ||
419 | /* The packet must be discarded, but this is only a fatal error | |
420 | * if the caller indicated it was | |
421 | */ | |
db339569 | 422 | rx_buf->flags |= EFX_RX_PKT_DISCARD; |
8ceee660 BH |
423 | |
424 | if ((len > rx_buf->len) && EFX_WORKAROUND_8071(efx)) { | |
62776d03 BH |
425 | if (net_ratelimit()) |
426 | netif_err(efx, rx_err, efx->net_dev, | |
427 | " RX queue %d seriously overlength " | |
428 | "RX event (0x%x > 0x%x+0x%x). Leaking\n", | |
ba1e8a35 | 429 | efx_rx_queue_index(rx_queue), len, max_len, |
62776d03 | 430 | efx->type->rx_buffer_padding); |
8ceee660 BH |
431 | /* If this buffer was skb-allocated, then the meta |
432 | * data at the end of the skb will be trashed. So | |
433 | * we have no choice but to leak the fragment. | |
434 | */ | |
db339569 | 435 | *leak_packet = !(rx_buf->flags & EFX_RX_BUF_PAGE); |
8ceee660 BH |
436 | efx_schedule_reset(efx, RESET_TYPE_RX_RECOVERY); |
437 | } else { | |
62776d03 BH |
438 | if (net_ratelimit()) |
439 | netif_err(efx, rx_err, efx->net_dev, | |
440 | " RX queue %d overlength RX event " | |
441 | "(0x%x > 0x%x)\n", | |
ba1e8a35 | 442 | efx_rx_queue_index(rx_queue), len, max_len); |
8ceee660 BH |
443 | } |
444 | ||
ba1e8a35 | 445 | efx_rx_queue_channel(rx_queue)->n_rx_overlength++; |
8ceee660 BH |
446 | } |
447 | ||
61321d92 BH |
448 | /* Pass a received packet up through GRO. GRO can handle pages |
449 | * regardless of checksum state and skbs with a good checksum. | |
8ceee660 | 450 | */ |
4afb7527 | 451 | static void efx_rx_packet_gro(struct efx_channel *channel, |
345056af | 452 | struct efx_rx_buffer *rx_buf, |
db339569 | 453 | const u8 *eh) |
8ceee660 | 454 | { |
da3bc071 | 455 | struct napi_struct *napi = &channel->napi_str; |
18e1d2be | 456 | gro_result_t gro_result; |
8ceee660 | 457 | |
db339569 | 458 | if (rx_buf->flags & EFX_RX_BUF_PAGE) { |
39c9cf07 | 459 | struct efx_nic *efx = channel->efx; |
8ba5366a | 460 | struct page *page = rx_buf->u.page; |
1241e951 | 461 | struct sk_buff *skb; |
8ceee660 | 462 | |
8ba5366a | 463 | rx_buf->u.page = NULL; |
1241e951 BH |
464 | |
465 | skb = napi_get_frags(napi); | |
76620aaf | 466 | if (!skb) { |
1241e951 BH |
467 | put_page(page); |
468 | return; | |
76620aaf HX |
469 | } |
470 | ||
39c9cf07 | 471 | if (efx->net_dev->features & NETIF_F_RXHASH) |
a526f140 | 472 | skb->rxhash = efx_rx_buf_hash(eh); |
39c9cf07 | 473 | |
70350b06 BH |
474 | skb_fill_page_desc(skb, 0, page, |
475 | efx_rx_buf_offset(efx, rx_buf), rx_buf->len); | |
76620aaf HX |
476 | |
477 | skb->len = rx_buf->len; | |
478 | skb->data_len = rx_buf->len; | |
479 | skb->truesize += rx_buf->len; | |
db339569 BH |
480 | skb->ip_summed = ((rx_buf->flags & EFX_RX_PKT_CSUMMED) ? |
481 | CHECKSUM_UNNECESSARY : CHECKSUM_NONE); | |
8ceee660 | 482 | |
79d68b37 | 483 | skb_record_rx_queue(skb, channel->rx_queue.core_index); |
3eadb7b0 | 484 | |
18e1d2be | 485 | gro_result = napi_gro_frags(napi); |
8ceee660 | 486 | } else { |
8ba5366a | 487 | struct sk_buff *skb = rx_buf->u.skb; |
8ceee660 | 488 | |
db339569 | 489 | EFX_BUG_ON_PARANOID(!(rx_buf->flags & EFX_RX_PKT_CSUMMED)); |
8ba5366a | 490 | rx_buf->u.skb = NULL; |
ff3bc1e7 | 491 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
1241e951 BH |
492 | |
493 | gro_result = napi_gro_receive(napi, skb); | |
8ceee660 | 494 | } |
18e1d2be BH |
495 | |
496 | if (gro_result == GRO_NORMAL) { | |
497 | channel->rx_alloc_level += RX_ALLOC_FACTOR_SKB; | |
498 | } else if (gro_result != GRO_DROP) { | |
4afb7527 | 499 | channel->rx_alloc_level += RX_ALLOC_FACTOR_GRO; |
18e1d2be BH |
500 | channel->irq_mod_score += 2; |
501 | } | |
8ceee660 BH |
502 | } |
503 | ||
8ceee660 | 504 | void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index, |
db339569 | 505 | unsigned int len, u16 flags) |
8ceee660 BH |
506 | { |
507 | struct efx_nic *efx = rx_queue->efx; | |
ba1e8a35 | 508 | struct efx_channel *channel = efx_rx_queue_channel(rx_queue); |
8ceee660 | 509 | struct efx_rx_buffer *rx_buf; |
dc8cfa55 | 510 | bool leak_packet = false; |
8ceee660 BH |
511 | |
512 | rx_buf = efx_rx_buffer(rx_queue, index); | |
db339569 | 513 | rx_buf->flags |= flags; |
8ceee660 BH |
514 | |
515 | /* This allows the refill path to post another buffer. | |
516 | * EFX_RXD_HEAD_ROOM ensures that the slot we are using | |
517 | * isn't overwritten yet. | |
518 | */ | |
519 | rx_queue->removed_count++; | |
520 | ||
521 | /* Validate the length encoded in the event vs the descriptor pushed */ | |
db339569 | 522 | efx_rx_packet__check_len(rx_queue, rx_buf, len, &leak_packet); |
8ceee660 | 523 | |
62776d03 BH |
524 | netif_vdbg(efx, rx_status, efx->net_dev, |
525 | "RX queue %d received id %x at %llx+%x %s%s\n", | |
ba1e8a35 | 526 | efx_rx_queue_index(rx_queue), index, |
62776d03 | 527 | (unsigned long long)rx_buf->dma_addr, len, |
db339569 BH |
528 | (rx_buf->flags & EFX_RX_PKT_CSUMMED) ? " [SUMMED]" : "", |
529 | (rx_buf->flags & EFX_RX_PKT_DISCARD) ? " [DISCARD]" : ""); | |
8ceee660 BH |
530 | |
531 | /* Discard packet, if instructed to do so */ | |
db339569 | 532 | if (unlikely(rx_buf->flags & EFX_RX_PKT_DISCARD)) { |
8ceee660 | 533 | if (unlikely(leak_packet)) |
24455800 | 534 | channel->n_skbuff_leaks++; |
8ceee660 | 535 | else |
24455800 SH |
536 | efx_recycle_rx_buffer(channel, rx_buf); |
537 | ||
538 | /* Don't hold off the previous receive */ | |
539 | rx_buf = NULL; | |
540 | goto out; | |
8ceee660 BH |
541 | } |
542 | ||
3a68f19d BH |
543 | /* Release and/or sync DMA mapping - assumes all RX buffers |
544 | * consumed in-order per RX queue | |
8ceee660 | 545 | */ |
3a68f19d | 546 | efx_unmap_rx_buffer(efx, rx_buf, len); |
8ceee660 BH |
547 | |
548 | /* Prefetch nice and early so data will (hopefully) be in cache by | |
549 | * the time we look at it. | |
550 | */ | |
a526f140 | 551 | prefetch(efx_rx_buf_eh(efx, rx_buf)); |
8ceee660 BH |
552 | |
553 | /* Pipeline receives so that we give time for packet headers to be | |
554 | * prefetched into cache. | |
555 | */ | |
a526f140 | 556 | rx_buf->len = len - efx->type->rx_buffer_hash_size; |
24455800 | 557 | out: |
ba1e8a35 | 558 | if (channel->rx_pkt) |
db339569 | 559 | __efx_rx_packet(channel, channel->rx_pkt); |
ba1e8a35 | 560 | channel->rx_pkt = rx_buf; |
8ceee660 BH |
561 | } |
562 | ||
1ddceb4c BH |
563 | static void efx_rx_deliver(struct efx_channel *channel, |
564 | struct efx_rx_buffer *rx_buf) | |
565 | { | |
566 | struct sk_buff *skb; | |
567 | ||
568 | /* We now own the SKB */ | |
569 | skb = rx_buf->u.skb; | |
570 | rx_buf->u.skb = NULL; | |
571 | ||
572 | /* Set the SKB flags */ | |
573 | skb_checksum_none_assert(skb); | |
574 | ||
79d68b37 SH |
575 | /* Record the rx_queue */ |
576 | skb_record_rx_queue(skb, channel->rx_queue.core_index); | |
577 | ||
c31e5f9f | 578 | if (channel->type->receive_skb) |
4a74dc65 BH |
579 | if (channel->type->receive_skb(channel, skb)) |
580 | goto handled; | |
581 | ||
582 | /* Pass the packet up */ | |
583 | netif_receive_skb(skb); | |
1ddceb4c | 584 | |
4a74dc65 | 585 | handled: |
1ddceb4c BH |
586 | /* Update allocation strategy method */ |
587 | channel->rx_alloc_level += RX_ALLOC_FACTOR_SKB; | |
588 | } | |
589 | ||
8ceee660 | 590 | /* Handle a received packet. Second half: Touches packet payload. */ |
db339569 | 591 | void __efx_rx_packet(struct efx_channel *channel, struct efx_rx_buffer *rx_buf) |
8ceee660 BH |
592 | { |
593 | struct efx_nic *efx = channel->efx; | |
a526f140 | 594 | u8 *eh = efx_rx_buf_eh(efx, rx_buf); |
604f6049 | 595 | |
3273c2e8 BH |
596 | /* If we're in loopback test, then pass the packet directly to the |
597 | * loopback layer, and free the rx_buf here | |
598 | */ | |
599 | if (unlikely(efx->loopback_selftest)) { | |
a526f140 | 600 | efx_loopback_rx_packet(efx, eh, rx_buf->len); |
3273c2e8 | 601 | efx_free_rx_buffer(efx, rx_buf); |
d96d7dc9 | 602 | return; |
3273c2e8 BH |
603 | } |
604 | ||
db339569 | 605 | if (!(rx_buf->flags & EFX_RX_BUF_PAGE)) { |
1ddceb4c | 606 | struct sk_buff *skb = rx_buf->u.skb; |
8ba5366a SH |
607 | |
608 | prefetch(skb_shinfo(skb)); | |
8ceee660 | 609 | |
8ba5366a SH |
610 | skb_reserve(skb, efx->type->rx_buffer_hash_size); |
611 | skb_put(skb, rx_buf->len); | |
8ceee660 | 612 | |
39c9cf07 | 613 | if (efx->net_dev->features & NETIF_F_RXHASH) |
a526f140 | 614 | skb->rxhash = efx_rx_buf_hash(eh); |
39c9cf07 | 615 | |
8ceee660 BH |
616 | /* Move past the ethernet header. rx_buf->data still points |
617 | * at the ethernet header */ | |
8ba5366a | 618 | skb->protocol = eth_type_trans(skb, efx->net_dev); |
3eadb7b0 | 619 | |
79d68b37 | 620 | skb_record_rx_queue(skb, channel->rx_queue.core_index); |
8ceee660 BH |
621 | } |
622 | ||
abfe9039 | 623 | if (unlikely(!(efx->net_dev->features & NETIF_F_RXCSUM))) |
db339569 | 624 | rx_buf->flags &= ~EFX_RX_PKT_CSUMMED; |
ab3cf6d0 | 625 | |
c31e5f9f SH |
626 | if (likely(rx_buf->flags & (EFX_RX_BUF_PAGE | EFX_RX_PKT_CSUMMED)) && |
627 | !channel->type->receive_skb) | |
db339569 | 628 | efx_rx_packet_gro(channel, rx_buf, eh); |
1ddceb4c BH |
629 | else |
630 | efx_rx_deliver(channel, rx_buf); | |
8ceee660 BH |
631 | } |
632 | ||
633 | void efx_rx_strategy(struct efx_channel *channel) | |
634 | { | |
635 | enum efx_rx_alloc_method method = rx_alloc_method; | |
636 | ||
c31e5f9f SH |
637 | if (channel->type->receive_skb) { |
638 | channel->rx_alloc_push_pages = false; | |
639 | return; | |
640 | } | |
641 | ||
4afb7527 | 642 | /* Only makes sense to use page based allocation if GRO is enabled */ |
da3bc071 | 643 | if (!(channel->efx->net_dev->features & NETIF_F_GRO)) { |
8ceee660 BH |
644 | method = RX_ALLOC_METHOD_SKB; |
645 | } else if (method == RX_ALLOC_METHOD_AUTO) { | |
646 | /* Constrain the rx_alloc_level */ | |
647 | if (channel->rx_alloc_level < 0) | |
648 | channel->rx_alloc_level = 0; | |
649 | else if (channel->rx_alloc_level > RX_ALLOC_LEVEL_MAX) | |
650 | channel->rx_alloc_level = RX_ALLOC_LEVEL_MAX; | |
651 | ||
652 | /* Decide on the allocation method */ | |
4afb7527 | 653 | method = ((channel->rx_alloc_level > RX_ALLOC_LEVEL_GRO) ? |
8ceee660 BH |
654 | RX_ALLOC_METHOD_PAGE : RX_ALLOC_METHOD_SKB); |
655 | } | |
656 | ||
657 | /* Push the option */ | |
658 | channel->rx_alloc_push_pages = (method == RX_ALLOC_METHOD_PAGE); | |
659 | } | |
660 | ||
661 | int efx_probe_rx_queue(struct efx_rx_queue *rx_queue) | |
662 | { | |
663 | struct efx_nic *efx = rx_queue->efx; | |
ecc910f5 | 664 | unsigned int entries; |
8ceee660 BH |
665 | int rc; |
666 | ||
ecc910f5 SH |
667 | /* Create the smallest power-of-two aligned ring */ |
668 | entries = max(roundup_pow_of_two(efx->rxq_entries), EFX_MIN_DMAQ_SIZE); | |
669 | EFX_BUG_ON_PARANOID(entries > EFX_MAX_DMAQ_SIZE); | |
670 | rx_queue->ptr_mask = entries - 1; | |
671 | ||
62776d03 | 672 | netif_dbg(efx, probe, efx->net_dev, |
ecc910f5 SH |
673 | "creating RX queue %d size %#x mask %#x\n", |
674 | efx_rx_queue_index(rx_queue), efx->rxq_entries, | |
675 | rx_queue->ptr_mask); | |
8ceee660 BH |
676 | |
677 | /* Allocate RX buffers */ | |
c2e4e25a | 678 | rx_queue->buffer = kcalloc(entries, sizeof(*rx_queue->buffer), |
ecc910f5 | 679 | GFP_KERNEL); |
8831da7b BH |
680 | if (!rx_queue->buffer) |
681 | return -ENOMEM; | |
8ceee660 | 682 | |
152b6a62 | 683 | rc = efx_nic_probe_rx(rx_queue); |
8831da7b BH |
684 | if (rc) { |
685 | kfree(rx_queue->buffer); | |
686 | rx_queue->buffer = NULL; | |
687 | } | |
8ceee660 BH |
688 | return rc; |
689 | } | |
690 | ||
bc3c90a2 | 691 | void efx_init_rx_queue(struct efx_rx_queue *rx_queue) |
8ceee660 | 692 | { |
ecc910f5 | 693 | struct efx_nic *efx = rx_queue->efx; |
64235187 | 694 | unsigned int max_fill, trigger, max_trigger; |
8ceee660 | 695 | |
62776d03 | 696 | netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev, |
ba1e8a35 | 697 | "initialising RX queue %d\n", efx_rx_queue_index(rx_queue)); |
8ceee660 BH |
698 | |
699 | /* Initialise ptr fields */ | |
700 | rx_queue->added_count = 0; | |
701 | rx_queue->notified_count = 0; | |
702 | rx_queue->removed_count = 0; | |
703 | rx_queue->min_fill = -1U; | |
8ceee660 BH |
704 | |
705 | /* Initialise limit fields */ | |
ecc910f5 | 706 | max_fill = efx->rxq_entries - EFX_RXD_HEAD_ROOM; |
64235187 DR |
707 | max_trigger = max_fill - EFX_RX_BATCH; |
708 | if (rx_refill_threshold != 0) { | |
709 | trigger = max_fill * min(rx_refill_threshold, 100U) / 100U; | |
710 | if (trigger > max_trigger) | |
711 | trigger = max_trigger; | |
712 | } else { | |
713 | trigger = max_trigger; | |
714 | } | |
8ceee660 BH |
715 | |
716 | rx_queue->max_fill = max_fill; | |
717 | rx_queue->fast_fill_trigger = trigger; | |
8ceee660 BH |
718 | |
719 | /* Set up RX descriptor ring */ | |
9f2cb71c | 720 | rx_queue->enabled = true; |
152b6a62 | 721 | efx_nic_init_rx(rx_queue); |
8ceee660 BH |
722 | } |
723 | ||
724 | void efx_fini_rx_queue(struct efx_rx_queue *rx_queue) | |
725 | { | |
726 | int i; | |
727 | struct efx_rx_buffer *rx_buf; | |
728 | ||
62776d03 | 729 | netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev, |
ba1e8a35 | 730 | "shutting down RX queue %d\n", efx_rx_queue_index(rx_queue)); |
8ceee660 | 731 | |
9f2cb71c BH |
732 | /* A flush failure might have left rx_queue->enabled */ |
733 | rx_queue->enabled = false; | |
734 | ||
90d683af | 735 | del_timer_sync(&rx_queue->slow_fill); |
152b6a62 | 736 | efx_nic_fini_rx(rx_queue); |
8ceee660 BH |
737 | |
738 | /* Release RX buffers NB start at index 0 not current HW ptr */ | |
739 | if (rx_queue->buffer) { | |
ecc910f5 | 740 | for (i = 0; i <= rx_queue->ptr_mask; i++) { |
8ceee660 BH |
741 | rx_buf = efx_rx_buffer(rx_queue, i); |
742 | efx_fini_rx_buffer(rx_queue, rx_buf); | |
743 | } | |
744 | } | |
8ceee660 BH |
745 | } |
746 | ||
747 | void efx_remove_rx_queue(struct efx_rx_queue *rx_queue) | |
748 | { | |
62776d03 | 749 | netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev, |
ba1e8a35 | 750 | "destroying RX queue %d\n", efx_rx_queue_index(rx_queue)); |
8ceee660 | 751 | |
152b6a62 | 752 | efx_nic_remove_rx(rx_queue); |
8ceee660 BH |
753 | |
754 | kfree(rx_queue->buffer); | |
755 | rx_queue->buffer = NULL; | |
8ceee660 BH |
756 | } |
757 | ||
8ceee660 BH |
758 | |
759 | module_param(rx_alloc_method, int, 0644); | |
760 | MODULE_PARM_DESC(rx_alloc_method, "Allocation method used for RX buffers"); | |
761 | ||
762 | module_param(rx_refill_threshold, uint, 0444); | |
763 | MODULE_PARM_DESC(rx_refill_threshold, | |
64235187 | 764 | "RX descriptor ring refill threshold (%)"); |
8ceee660 | 765 |