Commit | Line | Data |
---|---|---|
8ceee660 BH |
1 | /**************************************************************************** |
2 | * Driver for Solarflare Solarstorm network controllers and boards | |
3 | * Copyright 2005-2006 Fen Systems Ltd. | |
0a6f40c6 | 4 | * Copyright 2005-2011 Solarflare Communications Inc. |
8ceee660 BH |
5 | * |
6 | * This program is free software; you can redistribute it and/or modify it | |
7 | * under the terms of the GNU General Public License version 2 as published | |
8 | * by the Free Software Foundation, incorporated herein by reference. | |
9 | */ | |
10 | ||
11 | #include <linux/socket.h> | |
12 | #include <linux/in.h> | |
5a0e3ad6 | 13 | #include <linux/slab.h> |
8ceee660 BH |
14 | #include <linux/ip.h> |
15 | #include <linux/tcp.h> | |
16 | #include <linux/udp.h> | |
70c71606 | 17 | #include <linux/prefetch.h> |
6eb07caf | 18 | #include <linux/moduleparam.h> |
8ceee660 BH |
19 | #include <net/ip.h> |
20 | #include <net/checksum.h> | |
21 | #include "net_driver.h" | |
8ceee660 | 22 | #include "efx.h" |
744093c9 | 23 | #include "nic.h" |
3273c2e8 | 24 | #include "selftest.h" |
8ceee660 BH |
25 | #include "workarounds.h" |
26 | ||
27 | /* Number of RX descriptors pushed at once. */ | |
28 | #define EFX_RX_BATCH 8 | |
29 | ||
272baeeb BH |
30 | /* Maximum length for an RX descriptor sharing a page */ |
31 | #define EFX_RX_HALF_PAGE ((PAGE_SIZE >> 1) - sizeof(struct efx_rx_page_state) \ | |
32 | - EFX_PAGE_IP_ALIGN) | |
62b330ba | 33 | |
8ceee660 BH |
34 | /* Size of buffer allocated for skb header area. */ |
35 | #define EFX_SKB_HEADERS 64u | |
36 | ||
8ceee660 BH |
37 | /* This is the percentage fill level below which new RX descriptors |
38 | * will be added to the RX descriptor ring. | |
39 | */ | |
64235187 | 40 | static unsigned int rx_refill_threshold; |
8ceee660 | 41 | |
8ceee660 BH |
42 | /* |
43 | * RX maximum head room required. | |
44 | * | |
45 | * This must be at least 1 to prevent overflow and at least 2 to allow | |
62b330ba | 46 | * pipelined receives. |
8ceee660 | 47 | */ |
62b330ba | 48 | #define EFX_RXD_HEAD_ROOM 2 |
8ceee660 | 49 | |
b184f16b | 50 | static inline u8 *efx_rx_buf_va(struct efx_rx_buffer *buf) |
39c9cf07 | 51 | { |
b184f16b | 52 | return page_address(buf->page) + buf->page_offset; |
a526f140 SH |
53 | } |
54 | ||
55 | static inline u32 efx_rx_buf_hash(const u8 *eh) | |
56 | { | |
57 | /* The ethernet header is always directly after any hash. */ | |
39c9cf07 | 58 | #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) || NET_IP_ALIGN % 4 == 0 |
a526f140 | 59 | return __le32_to_cpup((const __le32 *)(eh - 4)); |
39c9cf07 | 60 | #else |
a526f140 | 61 | const u8 *data = eh - 4; |
0beaca2c BH |
62 | return (u32)data[0] | |
63 | (u32)data[1] << 8 | | |
64 | (u32)data[2] << 16 | | |
65 | (u32)data[3] << 24; | |
39c9cf07 BH |
66 | #endif |
67 | } | |
68 | ||
8ceee660 | 69 | /** |
97d48a10 | 70 | * efx_init_rx_buffers - create EFX_RX_BATCH page-based RX buffers |
8ceee660 BH |
71 | * |
72 | * @rx_queue: Efx RX queue | |
8ceee660 | 73 | * |
f7d6f379 SH |
74 | * This allocates memory for EFX_RX_BATCH receive buffers, maps them for DMA, |
75 | * and populates struct efx_rx_buffers for each one. Return a negative error | |
76 | * code or 0 on success. If a single page can be split between two buffers, | |
77 | * then the page will either be inserted fully, or not at at all. | |
8ceee660 | 78 | */ |
97d48a10 | 79 | static int efx_init_rx_buffers(struct efx_rx_queue *rx_queue) |
8ceee660 BH |
80 | { |
81 | struct efx_nic *efx = rx_queue->efx; | |
f7d6f379 SH |
82 | struct efx_rx_buffer *rx_buf; |
83 | struct page *page; | |
b590ace0 | 84 | unsigned int page_offset; |
62b330ba | 85 | struct efx_rx_page_state *state; |
f7d6f379 SH |
86 | dma_addr_t dma_addr; |
87 | unsigned index, count; | |
88 | ||
89 | /* We can split a page between two buffers */ | |
90 | BUILD_BUG_ON(EFX_RX_BATCH & 1); | |
91 | ||
92 | for (count = 0; count < EFX_RX_BATCH; ++count) { | |
93 | page = alloc_pages(__GFP_COLD | __GFP_COMP | GFP_ATOMIC, | |
94 | efx->rx_buffer_order); | |
95 | if (unlikely(page == NULL)) | |
8ceee660 | 96 | return -ENOMEM; |
0e33d870 | 97 | dma_addr = dma_map_page(&efx->pci_dev->dev, page, 0, |
272baeeb | 98 | PAGE_SIZE << efx->rx_buffer_order, |
0e33d870 BH |
99 | DMA_FROM_DEVICE); |
100 | if (unlikely(dma_mapping_error(&efx->pci_dev->dev, dma_addr))) { | |
f7d6f379 | 101 | __free_pages(page, efx->rx_buffer_order); |
8ceee660 BH |
102 | return -EIO; |
103 | } | |
b8e02517 | 104 | state = page_address(page); |
62b330ba SH |
105 | state->refcnt = 0; |
106 | state->dma_addr = dma_addr; | |
107 | ||
62b330ba | 108 | dma_addr += sizeof(struct efx_rx_page_state); |
b590ace0 | 109 | page_offset = sizeof(struct efx_rx_page_state); |
f7d6f379 SH |
110 | |
111 | split: | |
ecc910f5 | 112 | index = rx_queue->added_count & rx_queue->ptr_mask; |
f7d6f379 | 113 | rx_buf = efx_rx_buffer(rx_queue, index); |
62b330ba | 114 | rx_buf->dma_addr = dma_addr + EFX_PAGE_IP_ALIGN; |
97d48a10 | 115 | rx_buf->page = page; |
c73e787a | 116 | rx_buf->page_offset = page_offset + EFX_PAGE_IP_ALIGN; |
272baeeb | 117 | rx_buf->len = efx->rx_dma_len; |
97d48a10 | 118 | rx_buf->flags = 0; |
f7d6f379 | 119 | ++rx_queue->added_count; |
62b330ba | 120 | ++state->refcnt; |
f7d6f379 | 121 | |
272baeeb | 122 | if ((~count & 1) && (efx->rx_dma_len <= EFX_RX_HALF_PAGE)) { |
f7d6f379 SH |
123 | /* Use the second half of the page */ |
124 | get_page(page); | |
125 | dma_addr += (PAGE_SIZE >> 1); | |
b590ace0 | 126 | page_offset += (PAGE_SIZE >> 1); |
f7d6f379 SH |
127 | ++count; |
128 | goto split; | |
8ceee660 BH |
129 | } |
130 | } | |
131 | ||
8ceee660 BH |
132 | return 0; |
133 | } | |
134 | ||
4d566063 | 135 | static void efx_unmap_rx_buffer(struct efx_nic *efx, |
3a68f19d BH |
136 | struct efx_rx_buffer *rx_buf, |
137 | unsigned int used_len) | |
8ceee660 | 138 | { |
97d48a10 | 139 | if (rx_buf->page) { |
62b330ba SH |
140 | struct efx_rx_page_state *state; |
141 | ||
97d48a10 | 142 | state = page_address(rx_buf->page); |
62b330ba | 143 | if (--state->refcnt == 0) { |
0e33d870 | 144 | dma_unmap_page(&efx->pci_dev->dev, |
62b330ba | 145 | state->dma_addr, |
272baeeb | 146 | PAGE_SIZE << efx->rx_buffer_order, |
0e33d870 | 147 | DMA_FROM_DEVICE); |
3a68f19d BH |
148 | } else if (used_len) { |
149 | dma_sync_single_for_cpu(&efx->pci_dev->dev, | |
150 | rx_buf->dma_addr, used_len, | |
151 | DMA_FROM_DEVICE); | |
8ceee660 | 152 | } |
8ceee660 BH |
153 | } |
154 | } | |
155 | ||
4d566063 BH |
156 | static void efx_free_rx_buffer(struct efx_nic *efx, |
157 | struct efx_rx_buffer *rx_buf) | |
8ceee660 | 158 | { |
97d48a10 AR |
159 | if (rx_buf->page) { |
160 | __free_pages(rx_buf->page, efx->rx_buffer_order); | |
161 | rx_buf->page = NULL; | |
8ceee660 BH |
162 | } |
163 | } | |
164 | ||
4d566063 BH |
165 | static void efx_fini_rx_buffer(struct efx_rx_queue *rx_queue, |
166 | struct efx_rx_buffer *rx_buf) | |
8ceee660 | 167 | { |
3a68f19d | 168 | efx_unmap_rx_buffer(rx_queue->efx, rx_buf, 0); |
8ceee660 BH |
169 | efx_free_rx_buffer(rx_queue->efx, rx_buf); |
170 | } | |
171 | ||
24455800 SH |
172 | /* Attempt to resurrect the other receive buffer that used to share this page, |
173 | * which had previously been passed up to the kernel and freed. */ | |
174 | static void efx_resurrect_rx_buffer(struct efx_rx_queue *rx_queue, | |
175 | struct efx_rx_buffer *rx_buf) | |
176 | { | |
97d48a10 | 177 | struct efx_rx_page_state *state = page_address(rx_buf->page); |
24455800 | 178 | struct efx_rx_buffer *new_buf; |
62b330ba SH |
179 | unsigned fill_level, index; |
180 | ||
181 | /* +1 because efx_rx_packet() incremented removed_count. +1 because | |
182 | * we'd like to insert an additional descriptor whilst leaving | |
183 | * EFX_RXD_HEAD_ROOM for the non-recycle path */ | |
184 | fill_level = (rx_queue->added_count - rx_queue->removed_count + 2); | |
ecc910f5 | 185 | if (unlikely(fill_level > rx_queue->max_fill)) { |
62b330ba SH |
186 | /* We could place "state" on a list, and drain the list in |
187 | * efx_fast_push_rx_descriptors(). For now, this will do. */ | |
188 | return; | |
189 | } | |
24455800 | 190 | |
62b330ba | 191 | ++state->refcnt; |
97d48a10 | 192 | get_page(rx_buf->page); |
24455800 | 193 | |
ecc910f5 | 194 | index = rx_queue->added_count & rx_queue->ptr_mask; |
24455800 | 195 | new_buf = efx_rx_buffer(rx_queue, index); |
62b330ba | 196 | new_buf->dma_addr = rx_buf->dma_addr ^ (PAGE_SIZE >> 1); |
97d48a10 | 197 | new_buf->page = rx_buf->page; |
24455800 SH |
198 | new_buf->len = rx_buf->len; |
199 | ++rx_queue->added_count; | |
200 | } | |
201 | ||
202 | /* Recycle the given rx buffer directly back into the rx_queue. There is | |
203 | * always room to add this buffer, because we've just popped a buffer. */ | |
204 | static void efx_recycle_rx_buffer(struct efx_channel *channel, | |
205 | struct efx_rx_buffer *rx_buf) | |
206 | { | |
207 | struct efx_nic *efx = channel->efx; | |
f7d12cdc | 208 | struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel); |
24455800 SH |
209 | struct efx_rx_buffer *new_buf; |
210 | unsigned index; | |
211 | ||
97d48a10 | 212 | rx_buf->flags = 0; |
db339569 | 213 | |
272baeeb | 214 | if (efx->rx_dma_len <= EFX_RX_HALF_PAGE && |
97d48a10 | 215 | page_count(rx_buf->page) == 1) |
62b330ba | 216 | efx_resurrect_rx_buffer(rx_queue, rx_buf); |
24455800 | 217 | |
ecc910f5 | 218 | index = rx_queue->added_count & rx_queue->ptr_mask; |
24455800 SH |
219 | new_buf = efx_rx_buffer(rx_queue, index); |
220 | ||
221 | memcpy(new_buf, rx_buf, sizeof(*new_buf)); | |
97d48a10 | 222 | rx_buf->page = NULL; |
24455800 SH |
223 | ++rx_queue->added_count; |
224 | } | |
225 | ||
8ceee660 BH |
226 | /** |
227 | * efx_fast_push_rx_descriptors - push new RX descriptors quickly | |
228 | * @rx_queue: RX descriptor queue | |
49ce9c2c | 229 | * |
8ceee660 | 230 | * This will aim to fill the RX descriptor queue up to |
da9ca505 | 231 | * @rx_queue->@max_fill. If there is insufficient atomic |
90d683af SH |
232 | * memory to do so, a slow fill will be scheduled. |
233 | * | |
234 | * The caller must provide serialisation (none is used here). In practise, | |
235 | * this means this function must run from the NAPI handler, or be called | |
236 | * when NAPI is disabled. | |
8ceee660 | 237 | */ |
90d683af | 238 | void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue) |
8ceee660 | 239 | { |
f7d6f379 SH |
240 | unsigned fill_level; |
241 | int space, rc = 0; | |
8ceee660 | 242 | |
90d683af | 243 | /* Calculate current fill level, and exit if we don't need to fill */ |
8ceee660 | 244 | fill_level = (rx_queue->added_count - rx_queue->removed_count); |
ecc910f5 | 245 | EFX_BUG_ON_PARANOID(fill_level > rx_queue->efx->rxq_entries); |
8ceee660 | 246 | if (fill_level >= rx_queue->fast_fill_trigger) |
24455800 | 247 | goto out; |
8ceee660 BH |
248 | |
249 | /* Record minimum fill level */ | |
b3475645 | 250 | if (unlikely(fill_level < rx_queue->min_fill)) { |
8ceee660 BH |
251 | if (fill_level) |
252 | rx_queue->min_fill = fill_level; | |
b3475645 | 253 | } |
8ceee660 | 254 | |
da9ca505 | 255 | space = rx_queue->max_fill - fill_level; |
64235187 | 256 | EFX_BUG_ON_PARANOID(space < EFX_RX_BATCH); |
8ceee660 | 257 | |
62776d03 BH |
258 | netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev, |
259 | "RX queue %d fast-filling descriptor ring from" | |
97d48a10 | 260 | " level %d to level %d\n", |
ba1e8a35 | 261 | efx_rx_queue_index(rx_queue), fill_level, |
97d48a10 AR |
262 | rx_queue->max_fill); |
263 | ||
8ceee660 BH |
264 | |
265 | do { | |
97d48a10 | 266 | rc = efx_init_rx_buffers(rx_queue); |
f7d6f379 SH |
267 | if (unlikely(rc)) { |
268 | /* Ensure that we don't leave the rx queue empty */ | |
269 | if (rx_queue->added_count == rx_queue->removed_count) | |
270 | efx_schedule_slow_fill(rx_queue); | |
271 | goto out; | |
8ceee660 BH |
272 | } |
273 | } while ((space -= EFX_RX_BATCH) >= EFX_RX_BATCH); | |
274 | ||
62776d03 BH |
275 | netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev, |
276 | "RX queue %d fast-filled descriptor ring " | |
ba1e8a35 | 277 | "to level %d\n", efx_rx_queue_index(rx_queue), |
62776d03 | 278 | rx_queue->added_count - rx_queue->removed_count); |
8ceee660 BH |
279 | |
280 | out: | |
24455800 SH |
281 | if (rx_queue->notified_count != rx_queue->added_count) |
282 | efx_nic_notify_rx_desc(rx_queue); | |
8ceee660 BH |
283 | } |
284 | ||
90d683af | 285 | void efx_rx_slow_fill(unsigned long context) |
8ceee660 | 286 | { |
90d683af | 287 | struct efx_rx_queue *rx_queue = (struct efx_rx_queue *)context; |
8ceee660 | 288 | |
90d683af | 289 | /* Post an event to cause NAPI to run and refill the queue */ |
2ae75dac | 290 | efx_nic_generate_fill_event(rx_queue); |
8ceee660 | 291 | ++rx_queue->slow_fill_count; |
8ceee660 BH |
292 | } |
293 | ||
4d566063 BH |
294 | static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue, |
295 | struct efx_rx_buffer *rx_buf, | |
97d48a10 | 296 | int len) |
8ceee660 BH |
297 | { |
298 | struct efx_nic *efx = rx_queue->efx; | |
299 | unsigned max_len = rx_buf->len - efx->type->rx_buffer_padding; | |
300 | ||
301 | if (likely(len <= max_len)) | |
302 | return; | |
303 | ||
304 | /* The packet must be discarded, but this is only a fatal error | |
305 | * if the caller indicated it was | |
306 | */ | |
db339569 | 307 | rx_buf->flags |= EFX_RX_PKT_DISCARD; |
8ceee660 BH |
308 | |
309 | if ((len > rx_buf->len) && EFX_WORKAROUND_8071(efx)) { | |
62776d03 BH |
310 | if (net_ratelimit()) |
311 | netif_err(efx, rx_err, efx->net_dev, | |
312 | " RX queue %d seriously overlength " | |
313 | "RX event (0x%x > 0x%x+0x%x). Leaking\n", | |
ba1e8a35 | 314 | efx_rx_queue_index(rx_queue), len, max_len, |
62776d03 | 315 | efx->type->rx_buffer_padding); |
8ceee660 BH |
316 | efx_schedule_reset(efx, RESET_TYPE_RX_RECOVERY); |
317 | } else { | |
62776d03 BH |
318 | if (net_ratelimit()) |
319 | netif_err(efx, rx_err, efx->net_dev, | |
320 | " RX queue %d overlength RX event " | |
321 | "(0x%x > 0x%x)\n", | |
ba1e8a35 | 322 | efx_rx_queue_index(rx_queue), len, max_len); |
8ceee660 BH |
323 | } |
324 | ||
ba1e8a35 | 325 | efx_rx_queue_channel(rx_queue)->n_rx_overlength++; |
8ceee660 BH |
326 | } |
327 | ||
61321d92 BH |
328 | /* Pass a received packet up through GRO. GRO can handle pages |
329 | * regardless of checksum state and skbs with a good checksum. | |
8ceee660 | 330 | */ |
4afb7527 | 331 | static void efx_rx_packet_gro(struct efx_channel *channel, |
345056af | 332 | struct efx_rx_buffer *rx_buf, |
db339569 | 333 | const u8 *eh) |
8ceee660 | 334 | { |
da3bc071 | 335 | struct napi_struct *napi = &channel->napi_str; |
18e1d2be | 336 | gro_result_t gro_result; |
97d48a10 AR |
337 | struct efx_nic *efx = channel->efx; |
338 | struct page *page = rx_buf->page; | |
339 | struct sk_buff *skb; | |
8ceee660 | 340 | |
97d48a10 | 341 | rx_buf->page = NULL; |
1241e951 | 342 | |
97d48a10 AR |
343 | skb = napi_get_frags(napi); |
344 | if (!skb) { | |
345 | put_page(page); | |
346 | return; | |
347 | } | |
76620aaf | 348 | |
97d48a10 AR |
349 | if (efx->net_dev->features & NETIF_F_RXHASH) |
350 | skb->rxhash = efx_rx_buf_hash(eh); | |
39c9cf07 | 351 | |
b74e3e8c | 352 | skb_fill_page_desc(skb, 0, page, rx_buf->page_offset, rx_buf->len); |
76620aaf | 353 | |
97d48a10 AR |
354 | skb->len = rx_buf->len; |
355 | skb->data_len = rx_buf->len; | |
356 | skb->truesize += rx_buf->len; | |
357 | skb->ip_summed = ((rx_buf->flags & EFX_RX_PKT_CSUMMED) ? | |
358 | CHECKSUM_UNNECESSARY : CHECKSUM_NONE); | |
8ceee660 | 359 | |
97d48a10 | 360 | skb_record_rx_queue(skb, channel->rx_queue.core_index); |
3eadb7b0 | 361 | |
18e1d2be | 362 | gro_result = napi_gro_frags(napi); |
8ceee660 | 363 | |
97d48a10 AR |
364 | if (gro_result != GRO_DROP) |
365 | channel->irq_mod_score += 2; | |
366 | } | |
1241e951 | 367 | |
97d48a10 AR |
368 | /* Allocate and construct an SKB around a struct page.*/ |
369 | static struct sk_buff *efx_rx_mk_skb(struct efx_channel *channel, | |
370 | struct efx_rx_buffer *rx_buf, | |
371 | u8 *eh, int hdr_len) | |
372 | { | |
373 | struct efx_nic *efx = channel->efx; | |
374 | struct sk_buff *skb; | |
18e1d2be | 375 | |
97d48a10 AR |
376 | /* Allocate an SKB to store the headers */ |
377 | skb = netdev_alloc_skb(efx->net_dev, hdr_len + EFX_PAGE_SKB_ALIGN); | |
378 | if (unlikely(skb == NULL)) | |
379 | return NULL; | |
380 | ||
381 | EFX_BUG_ON_PARANOID(rx_buf->len < hdr_len); | |
382 | ||
383 | skb_reserve(skb, EFX_PAGE_SKB_ALIGN); | |
384 | ||
385 | skb->len = rx_buf->len; | |
386 | skb->truesize = rx_buf->len + sizeof(struct sk_buff); | |
387 | memcpy(skb->data, eh, hdr_len); | |
388 | skb->tail += hdr_len; | |
389 | ||
390 | /* Append the remaining page onto the frag list */ | |
391 | if (rx_buf->len > hdr_len) { | |
392 | skb->data_len = skb->len - hdr_len; | |
393 | skb_fill_page_desc(skb, 0, rx_buf->page, | |
b74e3e8c | 394 | rx_buf->page_offset + hdr_len, |
97d48a10 AR |
395 | skb->data_len); |
396 | } else { | |
397 | __free_pages(rx_buf->page, efx->rx_buffer_order); | |
398 | skb->data_len = 0; | |
18e1d2be | 399 | } |
97d48a10 AR |
400 | |
401 | /* Ownership has transferred from the rx_buf to skb */ | |
402 | rx_buf->page = NULL; | |
403 | ||
404 | /* Move past the ethernet header */ | |
405 | skb->protocol = eth_type_trans(skb, efx->net_dev); | |
406 | ||
407 | return skb; | |
8ceee660 BH |
408 | } |
409 | ||
8ceee660 | 410 | void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index, |
db339569 | 411 | unsigned int len, u16 flags) |
8ceee660 BH |
412 | { |
413 | struct efx_nic *efx = rx_queue->efx; | |
ba1e8a35 | 414 | struct efx_channel *channel = efx_rx_queue_channel(rx_queue); |
8ceee660 | 415 | struct efx_rx_buffer *rx_buf; |
8ceee660 BH |
416 | |
417 | rx_buf = efx_rx_buffer(rx_queue, index); | |
db339569 | 418 | rx_buf->flags |= flags; |
8ceee660 BH |
419 | |
420 | /* This allows the refill path to post another buffer. | |
421 | * EFX_RXD_HEAD_ROOM ensures that the slot we are using | |
422 | * isn't overwritten yet. | |
423 | */ | |
424 | rx_queue->removed_count++; | |
425 | ||
426 | /* Validate the length encoded in the event vs the descriptor pushed */ | |
97d48a10 | 427 | efx_rx_packet__check_len(rx_queue, rx_buf, len); |
8ceee660 | 428 | |
62776d03 BH |
429 | netif_vdbg(efx, rx_status, efx->net_dev, |
430 | "RX queue %d received id %x at %llx+%x %s%s\n", | |
ba1e8a35 | 431 | efx_rx_queue_index(rx_queue), index, |
62776d03 | 432 | (unsigned long long)rx_buf->dma_addr, len, |
db339569 BH |
433 | (rx_buf->flags & EFX_RX_PKT_CSUMMED) ? " [SUMMED]" : "", |
434 | (rx_buf->flags & EFX_RX_PKT_DISCARD) ? " [DISCARD]" : ""); | |
8ceee660 BH |
435 | |
436 | /* Discard packet, if instructed to do so */ | |
db339569 | 437 | if (unlikely(rx_buf->flags & EFX_RX_PKT_DISCARD)) { |
97d48a10 | 438 | efx_recycle_rx_buffer(channel, rx_buf); |
24455800 SH |
439 | |
440 | /* Don't hold off the previous receive */ | |
441 | rx_buf = NULL; | |
442 | goto out; | |
8ceee660 BH |
443 | } |
444 | ||
3a68f19d BH |
445 | /* Release and/or sync DMA mapping - assumes all RX buffers |
446 | * consumed in-order per RX queue | |
8ceee660 | 447 | */ |
3a68f19d | 448 | efx_unmap_rx_buffer(efx, rx_buf, len); |
8ceee660 BH |
449 | |
450 | /* Prefetch nice and early so data will (hopefully) be in cache by | |
451 | * the time we look at it. | |
452 | */ | |
5036b7c7 | 453 | prefetch(efx_rx_buf_va(rx_buf)); |
8ceee660 | 454 | |
b74e3e8c BH |
455 | rx_buf->page_offset += efx->type->rx_buffer_hash_size; |
456 | rx_buf->len = len - efx->type->rx_buffer_hash_size; | |
457 | ||
8ceee660 BH |
458 | /* Pipeline receives so that we give time for packet headers to be |
459 | * prefetched into cache. | |
460 | */ | |
24455800 | 461 | out: |
ff734ef4 | 462 | efx_rx_flush_packet(channel); |
ba1e8a35 | 463 | channel->rx_pkt = rx_buf; |
8ceee660 BH |
464 | } |
465 | ||
97d48a10 | 466 | static void efx_rx_deliver(struct efx_channel *channel, u8 *eh, |
1ddceb4c BH |
467 | struct efx_rx_buffer *rx_buf) |
468 | { | |
469 | struct sk_buff *skb; | |
97d48a10 | 470 | u16 hdr_len = min_t(u16, rx_buf->len, EFX_SKB_HEADERS); |
1ddceb4c | 471 | |
97d48a10 AR |
472 | skb = efx_rx_mk_skb(channel, rx_buf, eh, hdr_len); |
473 | if (unlikely(skb == NULL)) { | |
474 | efx_free_rx_buffer(channel->efx, rx_buf); | |
475 | return; | |
476 | } | |
477 | skb_record_rx_queue(skb, channel->rx_queue.core_index); | |
1ddceb4c BH |
478 | |
479 | /* Set the SKB flags */ | |
480 | skb_checksum_none_assert(skb); | |
481 | ||
c31e5f9f | 482 | if (channel->type->receive_skb) |
4a74dc65 | 483 | if (channel->type->receive_skb(channel, skb)) |
97d48a10 | 484 | return; |
4a74dc65 BH |
485 | |
486 | /* Pass the packet up */ | |
487 | netif_receive_skb(skb); | |
1ddceb4c BH |
488 | } |
489 | ||
8ceee660 | 490 | /* Handle a received packet. Second half: Touches packet payload. */ |
db339569 | 491 | void __efx_rx_packet(struct efx_channel *channel, struct efx_rx_buffer *rx_buf) |
8ceee660 BH |
492 | { |
493 | struct efx_nic *efx = channel->efx; | |
b74e3e8c | 494 | u8 *eh = efx_rx_buf_va(rx_buf); |
604f6049 | 495 | |
3273c2e8 BH |
496 | /* If we're in loopback test, then pass the packet directly to the |
497 | * loopback layer, and free the rx_buf here | |
498 | */ | |
499 | if (unlikely(efx->loopback_selftest)) { | |
a526f140 | 500 | efx_loopback_rx_packet(efx, eh, rx_buf->len); |
3273c2e8 | 501 | efx_free_rx_buffer(efx, rx_buf); |
d96d7dc9 | 502 | return; |
3273c2e8 BH |
503 | } |
504 | ||
abfe9039 | 505 | if (unlikely(!(efx->net_dev->features & NETIF_F_RXCSUM))) |
db339569 | 506 | rx_buf->flags &= ~EFX_RX_PKT_CSUMMED; |
ab3cf6d0 | 507 | |
97d48a10 | 508 | if (!channel->type->receive_skb) |
db339569 | 509 | efx_rx_packet_gro(channel, rx_buf, eh); |
1ddceb4c | 510 | else |
97d48a10 | 511 | efx_rx_deliver(channel, eh, rx_buf); |
8ceee660 BH |
512 | } |
513 | ||
514 | int efx_probe_rx_queue(struct efx_rx_queue *rx_queue) | |
515 | { | |
516 | struct efx_nic *efx = rx_queue->efx; | |
ecc910f5 | 517 | unsigned int entries; |
8ceee660 BH |
518 | int rc; |
519 | ||
ecc910f5 SH |
520 | /* Create the smallest power-of-two aligned ring */ |
521 | entries = max(roundup_pow_of_two(efx->rxq_entries), EFX_MIN_DMAQ_SIZE); | |
522 | EFX_BUG_ON_PARANOID(entries > EFX_MAX_DMAQ_SIZE); | |
523 | rx_queue->ptr_mask = entries - 1; | |
524 | ||
62776d03 | 525 | netif_dbg(efx, probe, efx->net_dev, |
ecc910f5 SH |
526 | "creating RX queue %d size %#x mask %#x\n", |
527 | efx_rx_queue_index(rx_queue), efx->rxq_entries, | |
528 | rx_queue->ptr_mask); | |
8ceee660 BH |
529 | |
530 | /* Allocate RX buffers */ | |
c2e4e25a | 531 | rx_queue->buffer = kcalloc(entries, sizeof(*rx_queue->buffer), |
ecc910f5 | 532 | GFP_KERNEL); |
8831da7b BH |
533 | if (!rx_queue->buffer) |
534 | return -ENOMEM; | |
8ceee660 | 535 | |
152b6a62 | 536 | rc = efx_nic_probe_rx(rx_queue); |
8831da7b BH |
537 | if (rc) { |
538 | kfree(rx_queue->buffer); | |
539 | rx_queue->buffer = NULL; | |
540 | } | |
8ceee660 BH |
541 | return rc; |
542 | } | |
543 | ||
bc3c90a2 | 544 | void efx_init_rx_queue(struct efx_rx_queue *rx_queue) |
8ceee660 | 545 | { |
ecc910f5 | 546 | struct efx_nic *efx = rx_queue->efx; |
64235187 | 547 | unsigned int max_fill, trigger, max_trigger; |
8ceee660 | 548 | |
62776d03 | 549 | netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev, |
ba1e8a35 | 550 | "initialising RX queue %d\n", efx_rx_queue_index(rx_queue)); |
8ceee660 BH |
551 | |
552 | /* Initialise ptr fields */ | |
553 | rx_queue->added_count = 0; | |
554 | rx_queue->notified_count = 0; | |
555 | rx_queue->removed_count = 0; | |
556 | rx_queue->min_fill = -1U; | |
8ceee660 BH |
557 | |
558 | /* Initialise limit fields */ | |
ecc910f5 | 559 | max_fill = efx->rxq_entries - EFX_RXD_HEAD_ROOM; |
64235187 DR |
560 | max_trigger = max_fill - EFX_RX_BATCH; |
561 | if (rx_refill_threshold != 0) { | |
562 | trigger = max_fill * min(rx_refill_threshold, 100U) / 100U; | |
563 | if (trigger > max_trigger) | |
564 | trigger = max_trigger; | |
565 | } else { | |
566 | trigger = max_trigger; | |
567 | } | |
8ceee660 BH |
568 | |
569 | rx_queue->max_fill = max_fill; | |
570 | rx_queue->fast_fill_trigger = trigger; | |
8ceee660 BH |
571 | |
572 | /* Set up RX descriptor ring */ | |
9f2cb71c | 573 | rx_queue->enabled = true; |
152b6a62 | 574 | efx_nic_init_rx(rx_queue); |
8ceee660 BH |
575 | } |
576 | ||
577 | void efx_fini_rx_queue(struct efx_rx_queue *rx_queue) | |
578 | { | |
579 | int i; | |
580 | struct efx_rx_buffer *rx_buf; | |
581 | ||
62776d03 | 582 | netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev, |
ba1e8a35 | 583 | "shutting down RX queue %d\n", efx_rx_queue_index(rx_queue)); |
8ceee660 | 584 | |
9f2cb71c BH |
585 | /* A flush failure might have left rx_queue->enabled */ |
586 | rx_queue->enabled = false; | |
587 | ||
90d683af | 588 | del_timer_sync(&rx_queue->slow_fill); |
152b6a62 | 589 | efx_nic_fini_rx(rx_queue); |
8ceee660 BH |
590 | |
591 | /* Release RX buffers NB start at index 0 not current HW ptr */ | |
592 | if (rx_queue->buffer) { | |
ecc910f5 | 593 | for (i = 0; i <= rx_queue->ptr_mask; i++) { |
8ceee660 BH |
594 | rx_buf = efx_rx_buffer(rx_queue, i); |
595 | efx_fini_rx_buffer(rx_queue, rx_buf); | |
596 | } | |
597 | } | |
8ceee660 BH |
598 | } |
599 | ||
600 | void efx_remove_rx_queue(struct efx_rx_queue *rx_queue) | |
601 | { | |
62776d03 | 602 | netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev, |
ba1e8a35 | 603 | "destroying RX queue %d\n", efx_rx_queue_index(rx_queue)); |
8ceee660 | 604 | |
152b6a62 | 605 | efx_nic_remove_rx(rx_queue); |
8ceee660 BH |
606 | |
607 | kfree(rx_queue->buffer); | |
608 | rx_queue->buffer = NULL; | |
8ceee660 BH |
609 | } |
610 | ||
8ceee660 | 611 | |
8ceee660 BH |
612 | module_param(rx_refill_threshold, uint, 0444); |
613 | MODULE_PARM_DESC(rx_refill_threshold, | |
64235187 | 614 | "RX descriptor ring refill threshold (%)"); |
8ceee660 | 615 |