1 /*******************************************************************************
3 * Intel Ethernet Controller XL710 Family Linux Driver
4 * Copyright(c) 2013 - 2014 Intel Corporation.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * You should have received a copy of the GNU General Public License along
16 * with this program. If not, see <http://www.gnu.org/licenses/>.
18 * The full GNU General Public License is included in this distribution in
19 * the file called "COPYING".
21 * Contact Information:
22 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25 ******************************************************************************/
27 #include <linux/prefetch.h>
29 #include "i40e_prototype.h"
31 static inline __le64 build_ctob(u32 td_cmd, u32 td_offset, unsigned int size,
34 return cpu_to_le64(I40E_TX_DESC_DTYPE_DATA |
35 ((u64)td_cmd << I40E_TXD_QW1_CMD_SHIFT) |
36 ((u64)td_offset << I40E_TXD_QW1_OFFSET_SHIFT) |
37 ((u64)size << I40E_TXD_QW1_TX_BUF_SZ_SHIFT) |
38 ((u64)td_tag << I40E_TXD_QW1_L2TAG1_SHIFT));
41 #define I40E_TXD_CMD (I40E_TX_DESC_CMD_EOP | I40E_TX_DESC_CMD_RS)
42 #define I40E_FD_CLEAN_DELAY 10
44 * i40e_program_fdir_filter - Program a Flow Director filter
45 * @fdir_data: Packet data that will be filter parameters
46 * @raw_packet: the pre-allocated packet buffer for FDir
48 * @add: True for add/update, False for remove
50 int i40e_program_fdir_filter(struct i40e_fdir_filter *fdir_data, u8 *raw_packet,
51 struct i40e_pf *pf, bool add)
53 struct i40e_filter_program_desc *fdir_desc;
54 struct i40e_tx_buffer *tx_buf, *first;
55 struct i40e_tx_desc *tx_desc;
56 struct i40e_ring *tx_ring;
57 unsigned int fpt, dcc;
65 /* find existing FDIR VSI */
67 for (i = 0; i < pf->num_alloc_vsi; i++)
68 if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR)
73 tx_ring = vsi->tx_rings[0];
76 /* we need two descriptors to add/del a filter and we can wait */
78 if (I40E_DESC_UNUSED(tx_ring) > 1)
80 msleep_interruptible(1);
82 } while (delay < I40E_FD_CLEAN_DELAY);
84 if (!(I40E_DESC_UNUSED(tx_ring) > 1))
87 dma = dma_map_single(dev, raw_packet,
88 I40E_FDIR_MAX_RAW_PACKET_SIZE, DMA_TO_DEVICE);
89 if (dma_mapping_error(dev, dma))
92 /* grab the next descriptor */
93 i = tx_ring->next_to_use;
94 fdir_desc = I40E_TX_FDIRDESC(tx_ring, i);
95 first = &tx_ring->tx_bi[i];
96 memset(first, 0, sizeof(struct i40e_tx_buffer));
98 tx_ring->next_to_use = ((i + 1) < tx_ring->count) ? i + 1 : 0;
100 fpt = (fdir_data->q_index << I40E_TXD_FLTR_QW0_QINDEX_SHIFT) &
101 I40E_TXD_FLTR_QW0_QINDEX_MASK;
103 fpt |= (fdir_data->flex_off << I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT) &
104 I40E_TXD_FLTR_QW0_FLEXOFF_MASK;
106 fpt |= (fdir_data->pctype << I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) &
107 I40E_TXD_FLTR_QW0_PCTYPE_MASK;
109 /* Use LAN VSI Id if not programmed by user */
110 if (fdir_data->dest_vsi == 0)
111 fpt |= (pf->vsi[pf->lan_vsi]->id) <<
112 I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT;
114 fpt |= ((u32)fdir_data->dest_vsi <<
115 I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT) &
116 I40E_TXD_FLTR_QW0_DEST_VSI_MASK;
118 dcc = I40E_TX_DESC_DTYPE_FILTER_PROG;
121 dcc |= I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE <<
122 I40E_TXD_FLTR_QW1_PCMD_SHIFT;
124 dcc |= I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE <<
125 I40E_TXD_FLTR_QW1_PCMD_SHIFT;
127 dcc |= (fdir_data->dest_ctl << I40E_TXD_FLTR_QW1_DEST_SHIFT) &
128 I40E_TXD_FLTR_QW1_DEST_MASK;
130 dcc |= (fdir_data->fd_status << I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT) &
131 I40E_TXD_FLTR_QW1_FD_STATUS_MASK;
133 if (fdir_data->cnt_index != 0) {
134 dcc |= I40E_TXD_FLTR_QW1_CNT_ENA_MASK;
135 dcc |= ((u32)fdir_data->cnt_index <<
136 I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
137 I40E_TXD_FLTR_QW1_CNTINDEX_MASK;
140 fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(fpt);
141 fdir_desc->rsvd = cpu_to_le32(0);
142 fdir_desc->dtype_cmd_cntindex = cpu_to_le32(dcc);
143 fdir_desc->fd_id = cpu_to_le32(fdir_data->fd_id);
145 /* Now program a dummy descriptor */
146 i = tx_ring->next_to_use;
147 tx_desc = I40E_TX_DESC(tx_ring, i);
148 tx_buf = &tx_ring->tx_bi[i];
150 tx_ring->next_to_use = ((i + 1) < tx_ring->count) ? i + 1 : 0;
152 memset(tx_buf, 0, sizeof(struct i40e_tx_buffer));
154 /* record length, and DMA address */
155 dma_unmap_len_set(tx_buf, len, I40E_FDIR_MAX_RAW_PACKET_SIZE);
156 dma_unmap_addr_set(tx_buf, dma, dma);
158 tx_desc->buffer_addr = cpu_to_le64(dma);
159 td_cmd = I40E_TXD_CMD | I40E_TX_DESC_CMD_DUMMY;
161 tx_buf->tx_flags = I40E_TX_FLAGS_FD_SB;
162 tx_buf->raw_buf = (void *)raw_packet;
164 tx_desc->cmd_type_offset_bsz =
165 build_ctob(td_cmd, 0, I40E_FDIR_MAX_RAW_PACKET_SIZE, 0);
167 /* set the timestamp */
168 tx_buf->time_stamp = jiffies;
170 /* Force memory writes to complete before letting h/w
171 * know there are new descriptors to fetch.
175 /* Mark the data descriptor to be watched */
176 first->next_to_watch = tx_desc;
178 writel(tx_ring->next_to_use, tx_ring->tail);
185 #define IP_HEADER_OFFSET 14
186 #define I40E_UDPIP_DUMMY_PACKET_LEN 42
188 * i40e_add_del_fdir_udpv4 - Add/Remove UDPv4 filters
189 * @vsi: pointer to the targeted VSI
190 * @fd_data: the flow director data required for the FDir descriptor
191 * @add: true adds a filter, false removes it
193 * Returns 0 if the filters were successfully added or removed
195 static int i40e_add_del_fdir_udpv4(struct i40e_vsi *vsi,
196 struct i40e_fdir_filter *fd_data,
199 struct i40e_pf *pf = vsi->back;
205 static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
206 0x45, 0, 0, 0x1c, 0, 0, 0x40, 0, 0x40, 0x11, 0, 0, 0, 0, 0, 0,
207 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
209 raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL);
212 memcpy(raw_packet, packet, I40E_UDPIP_DUMMY_PACKET_LEN);
214 ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET);
215 udp = (struct udphdr *)(raw_packet + IP_HEADER_OFFSET
216 + sizeof(struct iphdr));
218 ip->daddr = fd_data->dst_ip[0];
219 udp->dest = fd_data->dst_port;
220 ip->saddr = fd_data->src_ip[0];
221 udp->source = fd_data->src_port;
223 fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
224 ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
226 dev_info(&pf->pdev->dev,
227 "Filter command send failed for PCTYPE %d (ret = %d)\n",
228 fd_data->pctype, ret);
231 dev_info(&pf->pdev->dev,
232 "Filter OK for PCTYPE %d (ret = %d)\n",
233 fd_data->pctype, ret);
236 return err ? -EOPNOTSUPP : 0;
239 #define I40E_TCPIP_DUMMY_PACKET_LEN 54
241 * i40e_add_del_fdir_tcpv4 - Add/Remove TCPv4 filters
242 * @vsi: pointer to the targeted VSI
243 * @fd_data: the flow director data required for the FDir descriptor
244 * @add: true adds a filter, false removes it
246 * Returns 0 if the filters were successfully added or removed
248 static int i40e_add_del_fdir_tcpv4(struct i40e_vsi *vsi,
249 struct i40e_fdir_filter *fd_data,
252 struct i40e_pf *pf = vsi->back;
259 static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
260 0x45, 0, 0, 0x28, 0, 0, 0x40, 0, 0x40, 0x6, 0, 0, 0, 0, 0, 0,
261 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x80, 0x11,
262 0x0, 0x72, 0, 0, 0, 0};
264 raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL);
267 memcpy(raw_packet, packet, I40E_TCPIP_DUMMY_PACKET_LEN);
269 ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET);
270 tcp = (struct tcphdr *)(raw_packet + IP_HEADER_OFFSET
271 + sizeof(struct iphdr));
273 ip->daddr = fd_data->dst_ip[0];
274 tcp->dest = fd_data->dst_port;
275 ip->saddr = fd_data->src_ip[0];
276 tcp->source = fd_data->src_port;
279 if (pf->flags & I40E_FLAG_FD_ATR_ENABLED) {
280 dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 flow being applied\n");
281 pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED;
285 fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_TCP;
286 ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
289 dev_info(&pf->pdev->dev,
290 "Filter command send failed for PCTYPE %d (ret = %d)\n",
291 fd_data->pctype, ret);
294 dev_info(&pf->pdev->dev, "Filter OK for PCTYPE %d (ret = %d)\n",
295 fd_data->pctype, ret);
298 return err ? -EOPNOTSUPP : 0;
302 * i40e_add_del_fdir_sctpv4 - Add/Remove SCTPv4 Flow Director filters for
303 * a specific flow spec
304 * @vsi: pointer to the targeted VSI
305 * @fd_data: the flow director data required for the FDir descriptor
306 * @raw_packet: the pre-allocated packet buffer for FDir
307 * @add: true adds a filter, false removes it
309 * Always returns -EOPNOTSUPP
311 static int i40e_add_del_fdir_sctpv4(struct i40e_vsi *vsi,
312 struct i40e_fdir_filter *fd_data,
318 #define I40E_IP_DUMMY_PACKET_LEN 34
320 * i40e_add_del_fdir_ipv4 - Add/Remove IPv4 Flow Director filters for
321 * a specific flow spec
322 * @vsi: pointer to the targeted VSI
323 * @fd_data: the flow director data required for the FDir descriptor
324 * @add: true adds a filter, false removes it
326 * Returns 0 if the filters were successfully added or removed
328 static int i40e_add_del_fdir_ipv4(struct i40e_vsi *vsi,
329 struct i40e_fdir_filter *fd_data,
332 struct i40e_pf *pf = vsi->back;
338 static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
339 0x45, 0, 0, 0x14, 0, 0, 0x40, 0, 0x40, 0x10, 0, 0, 0, 0, 0, 0,
342 for (i = I40E_FILTER_PCTYPE_NONF_IPV4_OTHER;
343 i <= I40E_FILTER_PCTYPE_FRAG_IPV4; i++) {
344 raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL);
347 memcpy(raw_packet, packet, I40E_IP_DUMMY_PACKET_LEN);
348 ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET);
350 ip->saddr = fd_data->src_ip[0];
351 ip->daddr = fd_data->dst_ip[0];
355 ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
358 dev_info(&pf->pdev->dev,
359 "Filter command send failed for PCTYPE %d (ret = %d)\n",
360 fd_data->pctype, ret);
363 dev_info(&pf->pdev->dev,
364 "Filter OK for PCTYPE %d (ret = %d)\n",
365 fd_data->pctype, ret);
369 return err ? -EOPNOTSUPP : 0;
373 * i40e_add_del_fdir - Build raw packets to add/del fdir filter
374 * @vsi: pointer to the targeted VSI
375 * @cmd: command to get or set RX flow classification rules
376 * @add: true adds a filter, false removes it
379 int i40e_add_del_fdir(struct i40e_vsi *vsi,
380 struct i40e_fdir_filter *input, bool add)
382 struct i40e_pf *pf = vsi->back;
385 switch (input->flow_type & ~FLOW_EXT) {
387 ret = i40e_add_del_fdir_tcpv4(vsi, input, add);
390 ret = i40e_add_del_fdir_udpv4(vsi, input, add);
393 ret = i40e_add_del_fdir_sctpv4(vsi, input, add);
396 ret = i40e_add_del_fdir_ipv4(vsi, input, add);
399 switch (input->ip4_proto) {
401 ret = i40e_add_del_fdir_tcpv4(vsi, input, add);
404 ret = i40e_add_del_fdir_udpv4(vsi, input, add);
407 ret = i40e_add_del_fdir_sctpv4(vsi, input, add);
410 ret = i40e_add_del_fdir_ipv4(vsi, input, add);
415 dev_info(&pf->pdev->dev, "Could not specify spec type %d\n",
420 /* The buffer allocated here is freed by the i40e_clean_tx_ring() */
425 * i40e_fd_handle_status - check the Programming Status for FD
426 * @rx_ring: the Rx ring for this descriptor
427 * @rx_desc: the Rx descriptor for programming Status, not a packet descriptor.
428 * @prog_id: the id originally used for programming
430 * This is used to verify if the FD programming or invalidation
431 * requested by SW to the HW is successful or not and take actions accordingly.
433 static void i40e_fd_handle_status(struct i40e_ring *rx_ring,
434 union i40e_rx_desc *rx_desc, u8 prog_id)
436 struct i40e_pf *pf = rx_ring->vsi->back;
437 struct pci_dev *pdev = pf->pdev;
438 u32 fcnt_prog, fcnt_avail;
442 qw = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
443 error = (qw & I40E_RX_PROG_STATUS_DESC_QW1_ERROR_MASK) >>
444 I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT;
446 if (error == (0x1 << I40E_RX_PROG_STATUS_DESC_FD_TBL_FULL_SHIFT)) {
447 dev_warn(&pdev->dev, "ntuple filter loc = %d, could not be added\n",
448 rx_desc->wb.qword0.hi_dword.fd_id);
450 /* filter programming failed most likely due to table full */
451 fcnt_prog = i40e_get_cur_guaranteed_fd_count(pf);
452 fcnt_avail = pf->fdir_pf_filter_count;
453 /* If ATR is running fcnt_prog can quickly change,
454 * if we are very close to full, it makes sense to disable
455 * FD ATR/SB and then re-enable it when there is room.
457 if (fcnt_prog >= (fcnt_avail - I40E_FDIR_BUFFER_FULL_MARGIN)) {
458 /* Turn off ATR first */
459 if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
460 !(pf->auto_disable_flags &
461 I40E_FLAG_FD_ATR_ENABLED)) {
462 dev_warn(&pdev->dev, "FD filter space full, ATR for further flows will be turned off\n");
463 pf->auto_disable_flags |=
464 I40E_FLAG_FD_ATR_ENABLED;
465 pf->flags |= I40E_FLAG_FDIR_REQUIRES_REINIT;
466 } else if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
467 !(pf->auto_disable_flags &
468 I40E_FLAG_FD_SB_ENABLED)) {
469 dev_warn(&pdev->dev, "FD filter space full, new ntuple rules will not be added\n");
470 pf->auto_disable_flags |=
471 I40E_FLAG_FD_SB_ENABLED;
472 pf->flags |= I40E_FLAG_FDIR_REQUIRES_REINIT;
475 dev_info(&pdev->dev, "FD filter programming error\n");
478 (0x1 << I40E_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT)) {
479 if (I40E_DEBUG_FD & pf->hw.debug_mask)
480 dev_info(&pdev->dev, "ntuple filter loc = %d, could not be removed\n",
481 rx_desc->wb.qword0.hi_dword.fd_id);
486 * i40e_unmap_and_free_tx_resource - Release a Tx buffer
487 * @ring: the ring that owns the buffer
488 * @tx_buffer: the buffer to free
490 static void i40e_unmap_and_free_tx_resource(struct i40e_ring *ring,
491 struct i40e_tx_buffer *tx_buffer)
493 if (tx_buffer->skb) {
494 if (tx_buffer->tx_flags & I40E_TX_FLAGS_FD_SB)
495 kfree(tx_buffer->raw_buf);
497 dev_kfree_skb_any(tx_buffer->skb);
499 if (dma_unmap_len(tx_buffer, len))
500 dma_unmap_single(ring->dev,
501 dma_unmap_addr(tx_buffer, dma),
502 dma_unmap_len(tx_buffer, len),
504 } else if (dma_unmap_len(tx_buffer, len)) {
505 dma_unmap_page(ring->dev,
506 dma_unmap_addr(tx_buffer, dma),
507 dma_unmap_len(tx_buffer, len),
510 tx_buffer->next_to_watch = NULL;
511 tx_buffer->skb = NULL;
512 dma_unmap_len_set(tx_buffer, len, 0);
513 /* tx_buffer must be completely set up in the transmit path */
517 * i40e_clean_tx_ring - Free any empty Tx buffers
518 * @tx_ring: ring to be cleaned
520 void i40e_clean_tx_ring(struct i40e_ring *tx_ring)
522 unsigned long bi_size;
525 /* ring already cleared, nothing to do */
529 /* Free all the Tx ring sk_buffs */
530 for (i = 0; i < tx_ring->count; i++)
531 i40e_unmap_and_free_tx_resource(tx_ring, &tx_ring->tx_bi[i]);
533 bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count;
534 memset(tx_ring->tx_bi, 0, bi_size);
536 /* Zero out the descriptor ring */
537 memset(tx_ring->desc, 0, tx_ring->size);
539 tx_ring->next_to_use = 0;
540 tx_ring->next_to_clean = 0;
542 if (!tx_ring->netdev)
545 /* cleanup Tx queue statistics */
546 netdev_tx_reset_queue(netdev_get_tx_queue(tx_ring->netdev,
547 tx_ring->queue_index));
551 * i40e_free_tx_resources - Free Tx resources per queue
552 * @tx_ring: Tx descriptor ring for a specific queue
554 * Free all transmit software resources
556 void i40e_free_tx_resources(struct i40e_ring *tx_ring)
558 i40e_clean_tx_ring(tx_ring);
559 kfree(tx_ring->tx_bi);
560 tx_ring->tx_bi = NULL;
563 dma_free_coherent(tx_ring->dev, tx_ring->size,
564 tx_ring->desc, tx_ring->dma);
565 tx_ring->desc = NULL;
570 * i40e_get_tx_pending - how many tx descriptors not processed
571 * @tx_ring: the ring of descriptors
573 * Since there is no access to the ring head register
574 * in XL710, we need to use our local copies
576 static u32 i40e_get_tx_pending(struct i40e_ring *ring)
578 u32 ntu = ((ring->next_to_clean <= ring->next_to_use)
580 : ring->next_to_use + ring->count);
581 return ntu - ring->next_to_clean;
585 * i40e_check_tx_hang - Is there a hang in the Tx queue
586 * @tx_ring: the ring of descriptors
588 static bool i40e_check_tx_hang(struct i40e_ring *tx_ring)
590 u32 tx_pending = i40e_get_tx_pending(tx_ring);
593 clear_check_for_tx_hang(tx_ring);
595 /* Check for a hung queue, but be thorough. This verifies
596 * that a transmit has been completed since the previous
597 * check AND there is at least one packet pending. The
598 * ARMED bit is set to indicate a potential hang. The
599 * bit is cleared if a pause frame is received to remove
600 * false hang detection due to PFC or 802.3x frames. By
601 * requiring this to fail twice we avoid races with
602 * PFC clearing the ARMED bit and conditions where we
603 * run the check_tx_hang logic with a transmit completion
604 * pending but without time to complete it yet.
606 if ((tx_ring->tx_stats.tx_done_old == tx_ring->stats.packets) &&
608 /* make sure it is true for two checks in a row */
609 ret = test_and_set_bit(__I40E_HANG_CHECK_ARMED,
612 /* update completed stats and disarm the hang check */
613 tx_ring->tx_stats.tx_done_old = tx_ring->stats.packets;
614 clear_bit(__I40E_HANG_CHECK_ARMED, &tx_ring->state);
621 * i40e_get_head - Retrieve head from head writeback
622 * @tx_ring: tx ring to fetch head of
624 * Returns value of Tx ring head based on value stored
625 * in head write-back location
627 static inline u32 i40e_get_head(struct i40e_ring *tx_ring)
629 void *head = (struct i40e_tx_desc *)tx_ring->desc + tx_ring->count;
631 return le32_to_cpu(*(volatile __le32 *)head);
635 * i40e_clean_tx_irq - Reclaim resources after transmit completes
636 * @tx_ring: tx ring to clean
637 * @budget: how many cleans we're allowed
639 * Returns true if there's any budget left (e.g. the clean is finished)
641 static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
643 u16 i = tx_ring->next_to_clean;
644 struct i40e_tx_buffer *tx_buf;
645 struct i40e_tx_desc *tx_head;
646 struct i40e_tx_desc *tx_desc;
647 unsigned int total_packets = 0;
648 unsigned int total_bytes = 0;
650 tx_buf = &tx_ring->tx_bi[i];
651 tx_desc = I40E_TX_DESC(tx_ring, i);
654 tx_head = I40E_TX_DESC(tx_ring, i40e_get_head(tx_ring));
657 struct i40e_tx_desc *eop_desc = tx_buf->next_to_watch;
659 /* if next_to_watch is not set then there is no work pending */
663 /* prevent any other reads prior to eop_desc */
664 read_barrier_depends();
666 /* we have caught up to head, no work left to do */
667 if (tx_head == tx_desc)
670 /* clear next_to_watch to prevent false hangs */
671 tx_buf->next_to_watch = NULL;
673 /* update the statistics for this packet */
674 total_bytes += tx_buf->bytecount;
675 total_packets += tx_buf->gso_segs;
678 dev_kfree_skb_any(tx_buf->skb);
680 /* unmap skb header data */
681 dma_unmap_single(tx_ring->dev,
682 dma_unmap_addr(tx_buf, dma),
683 dma_unmap_len(tx_buf, len),
686 /* clear tx_buffer data */
688 dma_unmap_len_set(tx_buf, len, 0);
690 /* unmap remaining buffers */
691 while (tx_desc != eop_desc) {
698 tx_buf = tx_ring->tx_bi;
699 tx_desc = I40E_TX_DESC(tx_ring, 0);
702 /* unmap any remaining paged data */
703 if (dma_unmap_len(tx_buf, len)) {
704 dma_unmap_page(tx_ring->dev,
705 dma_unmap_addr(tx_buf, dma),
706 dma_unmap_len(tx_buf, len),
708 dma_unmap_len_set(tx_buf, len, 0);
712 /* move us one more past the eop_desc for start of next pkt */
718 tx_buf = tx_ring->tx_bi;
719 tx_desc = I40E_TX_DESC(tx_ring, 0);
722 /* update budget accounting */
724 } while (likely(budget));
727 tx_ring->next_to_clean = i;
728 u64_stats_update_begin(&tx_ring->syncp);
729 tx_ring->stats.bytes += total_bytes;
730 tx_ring->stats.packets += total_packets;
731 u64_stats_update_end(&tx_ring->syncp);
732 tx_ring->q_vector->tx.total_bytes += total_bytes;
733 tx_ring->q_vector->tx.total_packets += total_packets;
735 if (check_for_tx_hang(tx_ring) && i40e_check_tx_hang(tx_ring)) {
736 /* schedule immediate reset if we believe we hung */
737 dev_info(tx_ring->dev, "Detected Tx Unit Hang\n"
740 " next_to_use <%x>\n"
741 " next_to_clean <%x>\n",
743 tx_ring->queue_index,
744 tx_ring->next_to_use, i);
745 dev_info(tx_ring->dev, "tx_bi[next_to_clean]\n"
746 " time_stamp <%lx>\n"
748 tx_ring->tx_bi[i].time_stamp, jiffies);
750 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
752 dev_info(tx_ring->dev,
753 "tx hang detected on queue %d, resetting adapter\n",
754 tx_ring->queue_index);
756 tx_ring->netdev->netdev_ops->ndo_tx_timeout(tx_ring->netdev);
758 /* the adapter is about to reset, no point in enabling stuff */
762 netdev_tx_completed_queue(netdev_get_tx_queue(tx_ring->netdev,
763 tx_ring->queue_index),
764 total_packets, total_bytes);
766 #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
767 if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) &&
768 (I40E_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
769 /* Make sure that anybody stopping the queue after this
770 * sees the new next_to_clean.
773 if (__netif_subqueue_stopped(tx_ring->netdev,
774 tx_ring->queue_index) &&
775 !test_bit(__I40E_DOWN, &tx_ring->vsi->state)) {
776 netif_wake_subqueue(tx_ring->netdev,
777 tx_ring->queue_index);
778 ++tx_ring->tx_stats.restart_queue;
786 * i40e_set_new_dynamic_itr - Find new ITR level
787 * @rc: structure containing ring performance data
789 * Stores a new ITR value based on packets and byte counts during
790 * the last interrupt. The advantage of per interrupt computation
791 * is faster updates and more accurate ITR for the current traffic
792 * pattern. Constants in this function were computed based on
793 * theoretical maximum wire speed and thresholds were set based on
794 * testing data as well as attempting to minimize response time
795 * while increasing bulk throughput.
797 static void i40e_set_new_dynamic_itr(struct i40e_ring_container *rc)
799 enum i40e_latency_range new_latency_range = rc->latency_range;
800 u32 new_itr = rc->itr;
803 if (rc->total_packets == 0 || !rc->itr)
806 /* simple throttlerate management
807 * 0-10MB/s lowest (100000 ints/s)
808 * 10-20MB/s low (20000 ints/s)
809 * 20-1249MB/s bulk (8000 ints/s)
811 bytes_per_int = rc->total_bytes / rc->itr;
813 case I40E_LOWEST_LATENCY:
814 if (bytes_per_int > 10)
815 new_latency_range = I40E_LOW_LATENCY;
817 case I40E_LOW_LATENCY:
818 if (bytes_per_int > 20)
819 new_latency_range = I40E_BULK_LATENCY;
820 else if (bytes_per_int <= 10)
821 new_latency_range = I40E_LOWEST_LATENCY;
823 case I40E_BULK_LATENCY:
824 if (bytes_per_int <= 20)
825 rc->latency_range = I40E_LOW_LATENCY;
829 switch (new_latency_range) {
830 case I40E_LOWEST_LATENCY:
831 new_itr = I40E_ITR_100K;
833 case I40E_LOW_LATENCY:
834 new_itr = I40E_ITR_20K;
836 case I40E_BULK_LATENCY:
837 new_itr = I40E_ITR_8K;
843 if (new_itr != rc->itr) {
844 /* do an exponential smoothing */
845 new_itr = (10 * new_itr * rc->itr) /
846 ((9 * new_itr) + rc->itr);
847 rc->itr = new_itr & I40E_MAX_ITR;
851 rc->total_packets = 0;
855 * i40e_update_dynamic_itr - Adjust ITR based on bytes per int
856 * @q_vector: the vector to adjust
858 static void i40e_update_dynamic_itr(struct i40e_q_vector *q_vector)
860 u16 vector = q_vector->vsi->base_vector + q_vector->v_idx;
861 struct i40e_hw *hw = &q_vector->vsi->back->hw;
865 reg_addr = I40E_PFINT_ITRN(I40E_RX_ITR, vector - 1);
866 old_itr = q_vector->rx.itr;
867 i40e_set_new_dynamic_itr(&q_vector->rx);
868 if (old_itr != q_vector->rx.itr)
869 wr32(hw, reg_addr, q_vector->rx.itr);
871 reg_addr = I40E_PFINT_ITRN(I40E_TX_ITR, vector - 1);
872 old_itr = q_vector->tx.itr;
873 i40e_set_new_dynamic_itr(&q_vector->tx);
874 if (old_itr != q_vector->tx.itr)
875 wr32(hw, reg_addr, q_vector->tx.itr);
879 * i40e_clean_programming_status - clean the programming status descriptor
880 * @rx_ring: the rx ring that has this descriptor
881 * @rx_desc: the rx descriptor written back by HW
883 * Flow director should handle FD_FILTER_STATUS to check its filter programming
884 * status being successful or not and take actions accordingly. FCoE should
885 * handle its context/filter programming/invalidation status and take actions.
888 static void i40e_clean_programming_status(struct i40e_ring *rx_ring,
889 union i40e_rx_desc *rx_desc)
894 qw = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
895 id = (qw & I40E_RX_PROG_STATUS_DESC_QW1_PROGID_MASK) >>
896 I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT;
898 if (id == I40E_RX_PROG_STATUS_DESC_FD_FILTER_STATUS)
899 i40e_fd_handle_status(rx_ring, rx_desc, id);
903 * i40e_setup_tx_descriptors - Allocate the Tx descriptors
904 * @tx_ring: the tx ring to set up
906 * Return 0 on success, negative on error
908 int i40e_setup_tx_descriptors(struct i40e_ring *tx_ring)
910 struct device *dev = tx_ring->dev;
916 bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count;
917 tx_ring->tx_bi = kzalloc(bi_size, GFP_KERNEL);
921 /* round up to nearest 4K */
922 tx_ring->size = tx_ring->count * sizeof(struct i40e_tx_desc);
923 /* add u32 for head writeback, align after this takes care of
924 * guaranteeing this is at least one cache line in size
926 tx_ring->size += sizeof(u32);
927 tx_ring->size = ALIGN(tx_ring->size, 4096);
928 tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
929 &tx_ring->dma, GFP_KERNEL);
930 if (!tx_ring->desc) {
931 dev_info(dev, "Unable to allocate memory for the Tx descriptor ring, size=%d\n",
936 tx_ring->next_to_use = 0;
937 tx_ring->next_to_clean = 0;
941 kfree(tx_ring->tx_bi);
942 tx_ring->tx_bi = NULL;
947 * i40e_clean_rx_ring - Free Rx buffers
948 * @rx_ring: ring to be cleaned
950 void i40e_clean_rx_ring(struct i40e_ring *rx_ring)
952 struct device *dev = rx_ring->dev;
953 struct i40e_rx_buffer *rx_bi;
954 unsigned long bi_size;
957 /* ring already cleared, nothing to do */
961 /* Free all the Rx ring sk_buffs */
962 for (i = 0; i < rx_ring->count; i++) {
963 rx_bi = &rx_ring->rx_bi[i];
965 dma_unmap_single(dev,
972 dev_kfree_skb(rx_bi->skb);
976 if (rx_bi->page_dma) {
983 __free_page(rx_bi->page);
985 rx_bi->page_offset = 0;
989 bi_size = sizeof(struct i40e_rx_buffer) * rx_ring->count;
990 memset(rx_ring->rx_bi, 0, bi_size);
992 /* Zero out the descriptor ring */
993 memset(rx_ring->desc, 0, rx_ring->size);
995 rx_ring->next_to_clean = 0;
996 rx_ring->next_to_use = 0;
1000 * i40e_free_rx_resources - Free Rx resources
1001 * @rx_ring: ring to clean the resources from
1003 * Free all receive software resources
1005 void i40e_free_rx_resources(struct i40e_ring *rx_ring)
1007 i40e_clean_rx_ring(rx_ring);
1008 kfree(rx_ring->rx_bi);
1009 rx_ring->rx_bi = NULL;
1011 if (rx_ring->desc) {
1012 dma_free_coherent(rx_ring->dev, rx_ring->size,
1013 rx_ring->desc, rx_ring->dma);
1014 rx_ring->desc = NULL;
1019 * i40e_setup_rx_descriptors - Allocate Rx descriptors
1020 * @rx_ring: Rx descriptor ring (for a specific queue) to setup
1022 * Returns 0 on success, negative on failure
1024 int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring)
1026 struct device *dev = rx_ring->dev;
1029 bi_size = sizeof(struct i40e_rx_buffer) * rx_ring->count;
1030 rx_ring->rx_bi = kzalloc(bi_size, GFP_KERNEL);
1031 if (!rx_ring->rx_bi)
1034 /* Round up to nearest 4K */
1035 rx_ring->size = ring_is_16byte_desc_enabled(rx_ring)
1036 ? rx_ring->count * sizeof(union i40e_16byte_rx_desc)
1037 : rx_ring->count * sizeof(union i40e_32byte_rx_desc);
1038 rx_ring->size = ALIGN(rx_ring->size, 4096);
1039 rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
1040 &rx_ring->dma, GFP_KERNEL);
1042 if (!rx_ring->desc) {
1043 dev_info(dev, "Unable to allocate memory for the Rx descriptor ring, size=%d\n",
1048 rx_ring->next_to_clean = 0;
1049 rx_ring->next_to_use = 0;
1053 kfree(rx_ring->rx_bi);
1054 rx_ring->rx_bi = NULL;
1059 * i40e_release_rx_desc - Store the new tail and head values
1060 * @rx_ring: ring to bump
1061 * @val: new head index
1063 static inline void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val)
1065 rx_ring->next_to_use = val;
1066 /* Force memory writes to complete before letting h/w
1067 * know there are new descriptors to fetch. (Only
1068 * applicable for weak-ordered memory model archs,
1072 writel(val, rx_ring->tail);
1076 * i40e_alloc_rx_buffers - Replace used receive buffers; packet split
1077 * @rx_ring: ring to place buffers on
1078 * @cleaned_count: number of buffers to replace
1080 void i40e_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count)
1082 u16 i = rx_ring->next_to_use;
1083 union i40e_rx_desc *rx_desc;
1084 struct i40e_rx_buffer *bi;
1085 struct sk_buff *skb;
1087 /* do nothing if no valid netdev defined */
1088 if (!rx_ring->netdev || !cleaned_count)
1091 while (cleaned_count--) {
1092 rx_desc = I40E_RX_DESC(rx_ring, i);
1093 bi = &rx_ring->rx_bi[i];
1097 skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
1098 rx_ring->rx_buf_len);
1100 rx_ring->rx_stats.alloc_buff_failed++;
1103 /* initialize queue mapping */
1104 skb_record_rx_queue(skb, rx_ring->queue_index);
1109 bi->dma = dma_map_single(rx_ring->dev,
1111 rx_ring->rx_buf_len,
1113 if (dma_mapping_error(rx_ring->dev, bi->dma)) {
1114 rx_ring->rx_stats.alloc_buff_failed++;
1120 if (ring_is_ps_enabled(rx_ring)) {
1122 bi->page = alloc_page(GFP_ATOMIC);
1124 rx_ring->rx_stats.alloc_page_failed++;
1129 if (!bi->page_dma) {
1130 /* use a half page if we're re-using */
1131 bi->page_offset ^= PAGE_SIZE / 2;
1132 bi->page_dma = dma_map_page(rx_ring->dev,
1137 if (dma_mapping_error(rx_ring->dev,
1139 rx_ring->rx_stats.alloc_page_failed++;
1145 /* Refresh the desc even if buffer_addrs didn't change
1146 * because each write-back erases this info.
1148 rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma);
1149 rx_desc->read.hdr_addr = cpu_to_le64(bi->dma);
1151 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma);
1152 rx_desc->read.hdr_addr = 0;
1155 if (i == rx_ring->count)
1160 if (rx_ring->next_to_use != i)
1161 i40e_release_rx_desc(rx_ring, i);
1165 * i40e_receive_skb - Send a completed packet up the stack
1166 * @rx_ring: rx ring in play
1167 * @skb: packet to send up
1168 * @vlan_tag: vlan tag for packet
1170 static void i40e_receive_skb(struct i40e_ring *rx_ring,
1171 struct sk_buff *skb, u16 vlan_tag)
1173 struct i40e_q_vector *q_vector = rx_ring->q_vector;
1174 struct i40e_vsi *vsi = rx_ring->vsi;
1175 u64 flags = vsi->back->flags;
1177 if (vlan_tag & VLAN_VID_MASK)
1178 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
1180 if (flags & I40E_FLAG_IN_NETPOLL)
1183 napi_gro_receive(&q_vector->napi, skb);
1187 * i40e_rx_checksum - Indicate in skb if hw indicated a good cksum
1188 * @vsi: the VSI we care about
1189 * @skb: skb currently being received and modified
1190 * @rx_status: status value of last descriptor in packet
1191 * @rx_error: error value of last descriptor in packet
1192 * @rx_ptype: ptype value of last descriptor in packet
1194 static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
1195 struct sk_buff *skb,
1200 struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(rx_ptype);
1201 bool ipv4 = false, ipv6 = false;
1202 bool ipv4_tunnel, ipv6_tunnel;
1207 ipv4_tunnel = (rx_ptype > I40E_RX_PTYPE_GRENAT4_MAC_PAY3) &&
1208 (rx_ptype < I40E_RX_PTYPE_GRENAT4_MACVLAN_IPV6_ICMP_PAY4);
1209 ipv6_tunnel = (rx_ptype > I40E_RX_PTYPE_GRENAT6_MAC_PAY3) &&
1210 (rx_ptype < I40E_RX_PTYPE_GRENAT6_MACVLAN_IPV6_ICMP_PAY4);
1212 skb->encapsulation = ipv4_tunnel || ipv6_tunnel;
1213 skb->ip_summed = CHECKSUM_NONE;
1215 /* Rx csum enabled and ip headers found? */
1216 if (!(vsi->netdev->features & NETIF_F_RXCSUM))
1219 /* did the hardware decode the packet and checksum? */
1220 if (!(rx_status & (1 << I40E_RX_DESC_STATUS_L3L4P_SHIFT)))
1223 /* both known and outer_ip must be set for the below code to work */
1224 if (!(decoded.known && decoded.outer_ip))
1227 if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
1228 decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV4)
1230 else if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
1231 decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV6)
1235 (rx_error & ((1 << I40E_RX_DESC_ERROR_IPE_SHIFT) |
1236 (1 << I40E_RX_DESC_ERROR_EIPE_SHIFT))))
1239 /* likely incorrect csum if alternate IP extension headers found */
1241 decoded.inner_prot == I40E_RX_PTYPE_INNER_PROT_TCP &&
1242 rx_error & (1 << I40E_RX_DESC_ERROR_L4E_SHIFT) &&
1243 rx_status & (1 << I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT))
1244 /* don't increment checksum err here, non-fatal err */
1247 /* there was some L4 error, count error and punt packet to the stack */
1248 if (rx_error & (1 << I40E_RX_DESC_ERROR_L4E_SHIFT))
1251 /* handle packets that were not able to be checksummed due
1252 * to arrival speed, in this case the stack can compute
1255 if (rx_error & (1 << I40E_RX_DESC_ERROR_PPRS_SHIFT))
1258 /* If VXLAN traffic has an outer UDPv4 checksum we need to check
1259 * it in the driver, hardware does not do it for us.
1260 * Since L3L4P bit was set we assume a valid IHL value (>=5)
1261 * so the total length of IPv4 header is IHL*4 bytes
1262 * The UDP_0 bit *may* bet set if the *inner* header is UDP
1265 (decoded.inner_prot != I40E_RX_PTYPE_INNER_PROT_UDP) &&
1266 !(rx_status & (1 << I40E_RX_DESC_STATUS_UDP_0_SHIFT))) {
1267 skb->transport_header = skb->mac_header +
1268 sizeof(struct ethhdr) +
1269 (ip_hdr(skb)->ihl * 4);
1271 /* Add 4 bytes for VLAN tagged packets */
1272 skb->transport_header += (skb->protocol == htons(ETH_P_8021Q) ||
1273 skb->protocol == htons(ETH_P_8021AD))
1276 rx_udp_csum = udp_csum(skb);
1278 csum = csum_tcpudp_magic(
1279 iph->saddr, iph->daddr,
1280 (skb->len - skb_transport_offset(skb)),
1281 IPPROTO_UDP, rx_udp_csum);
1283 if (udp_hdr(skb)->check != csum)
1287 skb->ip_summed = CHECKSUM_UNNECESSARY;
1292 vsi->back->hw_csum_rx_error++;
1296 * i40e_rx_hash - returns the hash value from the Rx descriptor
1297 * @ring: descriptor ring
1298 * @rx_desc: specific descriptor
1300 static inline u32 i40e_rx_hash(struct i40e_ring *ring,
1301 union i40e_rx_desc *rx_desc)
1303 const __le64 rss_mask =
1304 cpu_to_le64((u64)I40E_RX_DESC_FLTSTAT_RSS_HASH <<
1305 I40E_RX_DESC_STATUS_FLTSTAT_SHIFT);
1307 if ((ring->netdev->features & NETIF_F_RXHASH) &&
1308 (rx_desc->wb.qword1.status_error_len & rss_mask) == rss_mask)
1309 return le32_to_cpu(rx_desc->wb.qword0.hi_dword.rss);
1315 * i40e_ptype_to_hash - get a hash type
1316 * @ptype: the ptype value from the descriptor
1318 * Returns a hash type to be used by skb_set_hash
1320 static inline enum pkt_hash_types i40e_ptype_to_hash(u8 ptype)
1322 struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(ptype);
1325 return PKT_HASH_TYPE_NONE;
1327 if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
1328 decoded.payload_layer == I40E_RX_PTYPE_PAYLOAD_LAYER_PAY4)
1329 return PKT_HASH_TYPE_L4;
1330 else if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
1331 decoded.payload_layer == I40E_RX_PTYPE_PAYLOAD_LAYER_PAY3)
1332 return PKT_HASH_TYPE_L3;
1334 return PKT_HASH_TYPE_L2;
1338 * i40e_clean_rx_irq - Reclaim resources after receive completes
1339 * @rx_ring: rx ring to clean
1340 * @budget: how many cleans we're allowed
1342 * Returns true if there's any budget left (e.g. the clean is finished)
1344 static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
1346 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
1347 u16 rx_packet_len, rx_header_len, rx_sph, rx_hbo;
1348 u16 cleaned_count = I40E_DESC_UNUSED(rx_ring);
1349 const int current_node = numa_node_id();
1350 struct i40e_vsi *vsi = rx_ring->vsi;
1351 u16 i = rx_ring->next_to_clean;
1352 union i40e_rx_desc *rx_desc;
1353 u32 rx_error, rx_status;
1360 rx_desc = I40E_RX_DESC(rx_ring, i);
1361 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
1362 rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
1363 I40E_RXD_QW1_STATUS_SHIFT;
1365 while (rx_status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT)) {
1366 union i40e_rx_desc *next_rxd;
1367 struct i40e_rx_buffer *rx_bi;
1368 struct sk_buff *skb;
1370 if (i40e_rx_is_programming_status(qword)) {
1371 i40e_clean_programming_status(rx_ring, rx_desc);
1372 I40E_RX_NEXT_DESC_PREFETCH(rx_ring, i, next_rxd);
1375 rx_bi = &rx_ring->rx_bi[i];
1377 prefetch(skb->data);
1379 rx_packet_len = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
1380 I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
1381 rx_header_len = (qword & I40E_RXD_QW1_LENGTH_HBUF_MASK) >>
1382 I40E_RXD_QW1_LENGTH_HBUF_SHIFT;
1383 rx_sph = (qword & I40E_RXD_QW1_LENGTH_SPH_MASK) >>
1384 I40E_RXD_QW1_LENGTH_SPH_SHIFT;
1386 rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >>
1387 I40E_RXD_QW1_ERROR_SHIFT;
1388 rx_hbo = rx_error & (1 << I40E_RX_DESC_ERROR_HBO_SHIFT);
1389 rx_error &= ~(1 << I40E_RX_DESC_ERROR_HBO_SHIFT);
1391 rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>
1392 I40E_RXD_QW1_PTYPE_SHIFT;
1395 /* This memory barrier is needed to keep us from reading
1396 * any other fields out of the rx_desc until we know the
1397 * STATUS_DD bit is set
1401 /* Get the header and possibly the whole packet
1402 * If this is an skb from previous receive dma will be 0
1408 len = I40E_RX_HDR_SIZE;
1410 len = rx_header_len;
1411 else if (rx_packet_len)
1412 len = rx_packet_len; /* 1buf/no split found */
1414 len = rx_header_len; /* split always mode */
1417 dma_unmap_single(rx_ring->dev,
1419 rx_ring->rx_buf_len,
1424 /* Get the rest of the data if this was a header split */
1425 if (ring_is_ps_enabled(rx_ring) && rx_packet_len) {
1427 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
1432 skb->len += rx_packet_len;
1433 skb->data_len += rx_packet_len;
1434 skb->truesize += rx_packet_len;
1436 if ((page_count(rx_bi->page) == 1) &&
1437 (page_to_nid(rx_bi->page) == current_node))
1438 get_page(rx_bi->page);
1442 dma_unmap_page(rx_ring->dev,
1446 rx_bi->page_dma = 0;
1448 I40E_RX_NEXT_DESC_PREFETCH(rx_ring, i, next_rxd);
1451 !(rx_status & (1 << I40E_RX_DESC_STATUS_EOF_SHIFT)))) {
1452 struct i40e_rx_buffer *next_buffer;
1454 next_buffer = &rx_ring->rx_bi[i];
1456 if (ring_is_ps_enabled(rx_ring)) {
1457 rx_bi->skb = next_buffer->skb;
1458 rx_bi->dma = next_buffer->dma;
1459 next_buffer->skb = skb;
1460 next_buffer->dma = 0;
1462 rx_ring->rx_stats.non_eop_descs++;
1466 /* ERR_MASK will only have valid bits if EOP set */
1467 if (unlikely(rx_error & (1 << I40E_RX_DESC_ERROR_RXE_SHIFT))) {
1468 dev_kfree_skb_any(skb);
1469 /* TODO: shouldn't we increment a counter indicating the
1475 skb_set_hash(skb, i40e_rx_hash(rx_ring, rx_desc),
1476 i40e_ptype_to_hash(rx_ptype));
1477 if (unlikely(rx_status & I40E_RXD_QW1_STATUS_TSYNVALID_MASK)) {
1478 i40e_ptp_rx_hwtstamp(vsi->back, skb, (rx_status &
1479 I40E_RXD_QW1_STATUS_TSYNINDX_MASK) >>
1480 I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT);
1481 rx_ring->last_rx_timestamp = jiffies;
1484 /* probably a little skewed due to removing CRC */
1485 total_rx_bytes += skb->len;
1488 skb->protocol = eth_type_trans(skb, rx_ring->netdev);
1490 i40e_rx_checksum(vsi, skb, rx_status, rx_error, rx_ptype);
1492 vlan_tag = rx_status & (1 << I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)
1493 ? le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1)
1495 i40e_receive_skb(rx_ring, skb, vlan_tag);
1497 rx_ring->netdev->last_rx = jiffies;
1500 rx_desc->wb.qword1.status_error_len = 0;
1505 /* return some buffers to hardware, one at a time is too slow */
1506 if (cleaned_count >= I40E_RX_BUFFER_WRITE) {
1507 i40e_alloc_rx_buffers(rx_ring, cleaned_count);
1511 /* use prefetched values */
1513 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
1514 rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
1515 I40E_RXD_QW1_STATUS_SHIFT;
1518 rx_ring->next_to_clean = i;
1519 u64_stats_update_begin(&rx_ring->syncp);
1520 rx_ring->stats.packets += total_rx_packets;
1521 rx_ring->stats.bytes += total_rx_bytes;
1522 u64_stats_update_end(&rx_ring->syncp);
1523 rx_ring->q_vector->rx.total_packets += total_rx_packets;
1524 rx_ring->q_vector->rx.total_bytes += total_rx_bytes;
1527 i40e_alloc_rx_buffers(rx_ring, cleaned_count);
1533 * i40e_napi_poll - NAPI polling Rx/Tx cleanup routine
1534 * @napi: napi struct with our devices info in it
1535 * @budget: amount of work driver is allowed to do this pass, in packets
1537 * This function will clean all queues associated with a q_vector.
1539 * Returns the amount of work done
1541 int i40e_napi_poll(struct napi_struct *napi, int budget)
1543 struct i40e_q_vector *q_vector =
1544 container_of(napi, struct i40e_q_vector, napi);
1545 struct i40e_vsi *vsi = q_vector->vsi;
1546 struct i40e_ring *ring;
1547 bool clean_complete = true;
1548 int budget_per_ring;
1550 if (test_bit(__I40E_DOWN, &vsi->state)) {
1551 napi_complete(napi);
1555 /* Since the actual Tx work is minimal, we can give the Tx a larger
1556 * budget and be more aggressive about cleaning up the Tx descriptors.
1558 i40e_for_each_ring(ring, q_vector->tx)
1559 clean_complete &= i40e_clean_tx_irq(ring, vsi->work_limit);
1561 /* We attempt to distribute budget to each Rx queue fairly, but don't
1562 * allow the budget to go below 1 because that would exit polling early.
1564 budget_per_ring = max(budget/q_vector->num_ringpairs, 1);
1566 i40e_for_each_ring(ring, q_vector->rx)
1567 clean_complete &= i40e_clean_rx_irq(ring, budget_per_ring);
1569 /* If work not completed, return budget and polling will return */
1570 if (!clean_complete)
1573 /* Work is done so exit the polling mode and re-enable the interrupt */
1574 napi_complete(napi);
1575 if (ITR_IS_DYNAMIC(vsi->rx_itr_setting) ||
1576 ITR_IS_DYNAMIC(vsi->tx_itr_setting))
1577 i40e_update_dynamic_itr(q_vector);
1579 if (!test_bit(__I40E_DOWN, &vsi->state)) {
1580 if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) {
1581 i40e_irq_dynamic_enable(vsi,
1582 q_vector->v_idx + vsi->base_vector);
1584 struct i40e_hw *hw = &vsi->back->hw;
1585 /* We re-enable the queue 0 cause, but
1586 * don't worry about dynamic_enable
1587 * because we left it on for the other
1588 * possible interrupts during napi
1590 u32 qval = rd32(hw, I40E_QINT_RQCTL(0));
1591 qval |= I40E_QINT_RQCTL_CAUSE_ENA_MASK;
1592 wr32(hw, I40E_QINT_RQCTL(0), qval);
1594 qval = rd32(hw, I40E_QINT_TQCTL(0));
1595 qval |= I40E_QINT_TQCTL_CAUSE_ENA_MASK;
1596 wr32(hw, I40E_QINT_TQCTL(0), qval);
1598 i40e_irq_dynamic_enable_icr0(vsi->back);
1606 * i40e_atr - Add a Flow Director ATR filter
1607 * @tx_ring: ring to add programming descriptor to
1609 * @flags: send flags
1610 * @protocol: wire protocol
1612 static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
1613 u32 flags, __be16 protocol)
1615 struct i40e_filter_program_desc *fdir_desc;
1616 struct i40e_pf *pf = tx_ring->vsi->back;
1618 unsigned char *network;
1620 struct ipv6hdr *ipv6;
1624 u32 flex_ptype, dtype_cmd;
1627 /* make sure ATR is enabled */
1628 if (!(pf->flags & I40E_FLAG_FD_ATR_ENABLED))
1631 /* if sampling is disabled do nothing */
1632 if (!tx_ring->atr_sample_rate)
1635 /* snag network header to get L4 type and address */
1636 hdr.network = skb_network_header(skb);
1638 /* Currently only IPv4/IPv6 with TCP is supported */
1639 if (protocol == htons(ETH_P_IP)) {
1640 if (hdr.ipv4->protocol != IPPROTO_TCP)
1643 /* access ihl as a u8 to avoid unaligned access on ia64 */
1644 hlen = (hdr.network[0] & 0x0F) << 2;
1645 } else if (protocol == htons(ETH_P_IPV6)) {
1646 if (hdr.ipv6->nexthdr != IPPROTO_TCP)
1649 hlen = sizeof(struct ipv6hdr);
1654 th = (struct tcphdr *)(hdr.network + hlen);
1656 /* Due to lack of space, no more new filters can be programmed */
1657 if (th->syn && (pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED))
1660 tx_ring->atr_count++;
1662 /* sample on all syn/fin/rst packets or once every atr sample rate */
1666 (tx_ring->atr_count < tx_ring->atr_sample_rate))
1669 tx_ring->atr_count = 0;
1671 /* grab the next descriptor */
1672 i = tx_ring->next_to_use;
1673 fdir_desc = I40E_TX_FDIRDESC(tx_ring, i);
1676 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
1678 flex_ptype = (tx_ring->queue_index << I40E_TXD_FLTR_QW0_QINDEX_SHIFT) &
1679 I40E_TXD_FLTR_QW0_QINDEX_MASK;
1680 flex_ptype |= (protocol == htons(ETH_P_IP)) ?
1681 (I40E_FILTER_PCTYPE_NONF_IPV4_TCP <<
1682 I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) :
1683 (I40E_FILTER_PCTYPE_NONF_IPV6_TCP <<
1684 I40E_TXD_FLTR_QW0_PCTYPE_SHIFT);
1686 flex_ptype |= tx_ring->vsi->id << I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT;
1688 dtype_cmd = I40E_TX_DESC_DTYPE_FILTER_PROG;
1690 dtype_cmd |= (th->fin || th->rst) ?
1691 (I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE <<
1692 I40E_TXD_FLTR_QW1_PCMD_SHIFT) :
1693 (I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE <<
1694 I40E_TXD_FLTR_QW1_PCMD_SHIFT);
1696 dtype_cmd |= I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX <<
1697 I40E_TXD_FLTR_QW1_DEST_SHIFT;
1699 dtype_cmd |= I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID <<
1700 I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT;
1702 dtype_cmd |= I40E_TXD_FLTR_QW1_CNT_ENA_MASK;
1704 ((u32)pf->fd_atr_cnt_idx << I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
1705 I40E_TXD_FLTR_QW1_CNTINDEX_MASK;
1707 fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(flex_ptype);
1708 fdir_desc->rsvd = cpu_to_le32(0);
1709 fdir_desc->dtype_cmd_cntindex = cpu_to_le32(dtype_cmd);
1710 fdir_desc->fd_id = cpu_to_le32(0);
1714 * i40e_tx_prepare_vlan_flags - prepare generic TX VLAN tagging flags for HW
1716 * @tx_ring: ring to send buffer on
1717 * @flags: the tx flags to be set
1719 * Checks the skb and set up correspondingly several generic transmit flags
1720 * related to VLAN tagging for the HW, such as VLAN, DCB, etc.
1722 * Returns error code indicate the frame should be dropped upon error and the
1723 * otherwise returns 0 to indicate the flags has been set properly.
1725 static int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
1726 struct i40e_ring *tx_ring,
1729 __be16 protocol = skb->protocol;
1732 /* if we have a HW VLAN tag being added, default to the HW one */
1733 if (vlan_tx_tag_present(skb)) {
1734 tx_flags |= vlan_tx_tag_get(skb) << I40E_TX_FLAGS_VLAN_SHIFT;
1735 tx_flags |= I40E_TX_FLAGS_HW_VLAN;
1736 /* else if it is a SW VLAN, check the next protocol and store the tag */
1737 } else if (protocol == htons(ETH_P_8021Q)) {
1738 struct vlan_hdr *vhdr, _vhdr;
1739 vhdr = skb_header_pointer(skb, ETH_HLEN, sizeof(_vhdr), &_vhdr);
1743 protocol = vhdr->h_vlan_encapsulated_proto;
1744 tx_flags |= ntohs(vhdr->h_vlan_TCI) << I40E_TX_FLAGS_VLAN_SHIFT;
1745 tx_flags |= I40E_TX_FLAGS_SW_VLAN;
1748 /* Insert 802.1p priority into VLAN header */
1749 if ((tx_ring->vsi->back->flags & I40E_FLAG_DCB_ENABLED) &&
1750 ((tx_flags & (I40E_TX_FLAGS_HW_VLAN | I40E_TX_FLAGS_SW_VLAN)) ||
1751 (skb->priority != TC_PRIO_CONTROL))) {
1752 tx_flags &= ~I40E_TX_FLAGS_VLAN_PRIO_MASK;
1753 tx_flags |= (skb->priority & 0x7) <<
1754 I40E_TX_FLAGS_VLAN_PRIO_SHIFT;
1755 if (tx_flags & I40E_TX_FLAGS_SW_VLAN) {
1756 struct vlan_ethhdr *vhdr;
1759 rc = skb_cow_head(skb, 0);
1762 vhdr = (struct vlan_ethhdr *)skb->data;
1763 vhdr->h_vlan_TCI = htons(tx_flags >>
1764 I40E_TX_FLAGS_VLAN_SHIFT);
1766 tx_flags |= I40E_TX_FLAGS_HW_VLAN;
1774 * i40e_tso - set up the tso context descriptor
1775 * @tx_ring: ptr to the ring to send
1776 * @skb: ptr to the skb we're sending
1777 * @tx_flags: the collected send information
1778 * @protocol: the send protocol
1779 * @hdr_len: ptr to the size of the packet header
1780 * @cd_tunneling: ptr to context descriptor bits
1782 * Returns 0 if no TSO can happen, 1 if tso is going, or error
1784 static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb,
1785 u32 tx_flags, __be16 protocol, u8 *hdr_len,
1786 u64 *cd_type_cmd_tso_mss, u32 *cd_tunneling)
1788 u32 cd_cmd, cd_tso_len, cd_mss;
1789 struct ipv6hdr *ipv6h;
1790 struct tcphdr *tcph;
1795 if (!skb_is_gso(skb))
1798 err = skb_cow_head(skb, 0);
1802 if (protocol == htons(ETH_P_IP)) {
1803 iph = skb->encapsulation ? inner_ip_hdr(skb) : ip_hdr(skb);
1804 tcph = skb->encapsulation ? inner_tcp_hdr(skb) : tcp_hdr(skb);
1807 tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
1809 } else if (skb_is_gso_v6(skb)) {
1811 ipv6h = skb->encapsulation ? inner_ipv6_hdr(skb)
1813 tcph = skb->encapsulation ? inner_tcp_hdr(skb) : tcp_hdr(skb);
1814 ipv6h->payload_len = 0;
1815 tcph->check = ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr,
1819 l4len = skb->encapsulation ? inner_tcp_hdrlen(skb) : tcp_hdrlen(skb);
1820 *hdr_len = (skb->encapsulation
1821 ? (skb_inner_transport_header(skb) - skb->data)
1822 : skb_transport_offset(skb)) + l4len;
1824 /* find the field values */
1825 cd_cmd = I40E_TX_CTX_DESC_TSO;
1826 cd_tso_len = skb->len - *hdr_len;
1827 cd_mss = skb_shinfo(skb)->gso_size;
1828 *cd_type_cmd_tso_mss |= ((u64)cd_cmd << I40E_TXD_CTX_QW1_CMD_SHIFT) |
1830 I40E_TXD_CTX_QW1_TSO_LEN_SHIFT) |
1831 ((u64)cd_mss << I40E_TXD_CTX_QW1_MSS_SHIFT);
1836 * i40e_tsyn - set up the tsyn context descriptor
1837 * @tx_ring: ptr to the ring to send
1838 * @skb: ptr to the skb we're sending
1839 * @tx_flags: the collected send information
1841 * Returns 0 if no Tx timestamp can happen and 1 if the timestamp will happen
1843 static int i40e_tsyn(struct i40e_ring *tx_ring, struct sk_buff *skb,
1844 u32 tx_flags, u64 *cd_type_cmd_tso_mss)
1848 if (likely(!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)))
1851 /* Tx timestamps cannot be sampled when doing TSO */
1852 if (tx_flags & I40E_TX_FLAGS_TSO)
1855 /* only timestamp the outbound packet if the user has requested it and
1856 * we are not already transmitting a packet to be timestamped
1858 pf = i40e_netdev_to_pf(tx_ring->netdev);
1859 if (pf->ptp_tx && !pf->ptp_tx_skb) {
1860 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
1861 pf->ptp_tx_skb = skb_get(skb);
1866 *cd_type_cmd_tso_mss |= (u64)I40E_TX_CTX_DESC_TSYN <<
1867 I40E_TXD_CTX_QW1_CMD_SHIFT;
1873 * i40e_tx_enable_csum - Enable Tx checksum offloads
1875 * @tx_flags: Tx flags currently set
1876 * @td_cmd: Tx descriptor command bits to set
1877 * @td_offset: Tx descriptor header offsets to set
1878 * @cd_tunneling: ptr to context desc bits
1880 static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags,
1881 u32 *td_cmd, u32 *td_offset,
1882 struct i40e_ring *tx_ring,
1885 struct ipv6hdr *this_ipv6_hdr;
1886 unsigned int this_tcp_hdrlen;
1887 struct iphdr *this_ip_hdr;
1888 u32 network_hdr_len;
1891 if (skb->encapsulation) {
1892 network_hdr_len = skb_inner_network_header_len(skb);
1893 this_ip_hdr = inner_ip_hdr(skb);
1894 this_ipv6_hdr = inner_ipv6_hdr(skb);
1895 this_tcp_hdrlen = inner_tcp_hdrlen(skb);
1897 if (tx_flags & I40E_TX_FLAGS_IPV4) {
1899 if (tx_flags & I40E_TX_FLAGS_TSO) {
1900 *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV4;
1901 ip_hdr(skb)->check = 0;
1904 I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM;
1906 } else if (tx_flags & I40E_TX_FLAGS_IPV6) {
1907 if (tx_flags & I40E_TX_FLAGS_TSO) {
1908 *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV6;
1909 ip_hdr(skb)->check = 0;
1912 I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM;
1916 /* Now set the ctx descriptor fields */
1917 *cd_tunneling |= (skb_network_header_len(skb) >> 2) <<
1918 I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT |
1919 I40E_TXD_CTX_UDP_TUNNELING |
1920 ((skb_inner_network_offset(skb) -
1921 skb_transport_offset(skb)) >> 1) <<
1922 I40E_TXD_CTX_QW0_NATLEN_SHIFT;
1925 network_hdr_len = skb_network_header_len(skb);
1926 this_ip_hdr = ip_hdr(skb);
1927 this_ipv6_hdr = ipv6_hdr(skb);
1928 this_tcp_hdrlen = tcp_hdrlen(skb);
1931 /* Enable IP checksum offloads */
1932 if (tx_flags & I40E_TX_FLAGS_IPV4) {
1933 l4_hdr = this_ip_hdr->protocol;
1934 /* the stack computes the IP header already, the only time we
1935 * need the hardware to recompute it is in the case of TSO.
1937 if (tx_flags & I40E_TX_FLAGS_TSO) {
1938 *td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV4_CSUM;
1939 this_ip_hdr->check = 0;
1941 *td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV4;
1943 /* Now set the td_offset for IP header length */
1944 *td_offset = (network_hdr_len >> 2) <<
1945 I40E_TX_DESC_LENGTH_IPLEN_SHIFT;
1946 } else if (tx_flags & I40E_TX_FLAGS_IPV6) {
1947 l4_hdr = this_ipv6_hdr->nexthdr;
1948 *td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV6;
1949 /* Now set the td_offset for IP header length */
1950 *td_offset = (network_hdr_len >> 2) <<
1951 I40E_TX_DESC_LENGTH_IPLEN_SHIFT;
1953 /* words in MACLEN + dwords in IPLEN + dwords in L4Len */
1954 *td_offset |= (skb_network_offset(skb) >> 1) <<
1955 I40E_TX_DESC_LENGTH_MACLEN_SHIFT;
1957 /* Enable L4 checksum offloads */
1960 /* enable checksum offloads */
1961 *td_cmd |= I40E_TX_DESC_CMD_L4T_EOFT_TCP;
1962 *td_offset |= (this_tcp_hdrlen >> 2) <<
1963 I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
1966 /* enable SCTP checksum offload */
1967 *td_cmd |= I40E_TX_DESC_CMD_L4T_EOFT_SCTP;
1968 *td_offset |= (sizeof(struct sctphdr) >> 2) <<
1969 I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
1972 /* enable UDP checksum offload */
1973 *td_cmd |= I40E_TX_DESC_CMD_L4T_EOFT_UDP;
1974 *td_offset |= (sizeof(struct udphdr) >> 2) <<
1975 I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
1983 * i40e_create_tx_ctx Build the Tx context descriptor
1984 * @tx_ring: ring to create the descriptor on
1985 * @cd_type_cmd_tso_mss: Quad Word 1
1986 * @cd_tunneling: Quad Word 0 - bits 0-31
1987 * @cd_l2tag2: Quad Word 0 - bits 32-63
1989 static void i40e_create_tx_ctx(struct i40e_ring *tx_ring,
1990 const u64 cd_type_cmd_tso_mss,
1991 const u32 cd_tunneling, const u32 cd_l2tag2)
1993 struct i40e_tx_context_desc *context_desc;
1994 int i = tx_ring->next_to_use;
1996 if ((cd_type_cmd_tso_mss == I40E_TX_DESC_DTYPE_CONTEXT) &&
1997 !cd_tunneling && !cd_l2tag2)
2000 /* grab the next descriptor */
2001 context_desc = I40E_TX_CTXTDESC(tx_ring, i);
2004 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
2006 /* cpu_to_le32 and assign to struct fields */
2007 context_desc->tunneling_params = cpu_to_le32(cd_tunneling);
2008 context_desc->l2tag2 = cpu_to_le16(cd_l2tag2);
2009 context_desc->rsvd = cpu_to_le16(0);
2010 context_desc->type_cmd_tso_mss = cpu_to_le64(cd_type_cmd_tso_mss);
2014 * i40e_tx_map - Build the Tx descriptor
2015 * @tx_ring: ring to send buffer on
2017 * @first: first buffer info buffer to use
2018 * @tx_flags: collected send information
2019 * @hdr_len: size of the packet header
2020 * @td_cmd: the command field in the descriptor
2021 * @td_offset: offset for checksum or crc
2023 static void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
2024 struct i40e_tx_buffer *first, u32 tx_flags,
2025 const u8 hdr_len, u32 td_cmd, u32 td_offset)
2027 unsigned int data_len = skb->data_len;
2028 unsigned int size = skb_headlen(skb);
2029 struct skb_frag_struct *frag;
2030 struct i40e_tx_buffer *tx_bi;
2031 struct i40e_tx_desc *tx_desc;
2032 u16 i = tx_ring->next_to_use;
2037 if (tx_flags & I40E_TX_FLAGS_HW_VLAN) {
2038 td_cmd |= I40E_TX_DESC_CMD_IL2TAG1;
2039 td_tag = (tx_flags & I40E_TX_FLAGS_VLAN_MASK) >>
2040 I40E_TX_FLAGS_VLAN_SHIFT;
2043 if (tx_flags & (I40E_TX_FLAGS_TSO | I40E_TX_FLAGS_FSO))
2044 gso_segs = skb_shinfo(skb)->gso_segs;
2048 /* multiply data chunks by size of headers */
2049 first->bytecount = skb->len - hdr_len + (gso_segs * hdr_len);
2050 first->gso_segs = gso_segs;
2052 first->tx_flags = tx_flags;
2054 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
2056 tx_desc = I40E_TX_DESC(tx_ring, i);
2059 for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
2060 if (dma_mapping_error(tx_ring->dev, dma))
2063 /* record length, and DMA address */
2064 dma_unmap_len_set(tx_bi, len, size);
2065 dma_unmap_addr_set(tx_bi, dma, dma);
2067 tx_desc->buffer_addr = cpu_to_le64(dma);
2069 while (unlikely(size > I40E_MAX_DATA_PER_TXD)) {
2070 tx_desc->cmd_type_offset_bsz =
2071 build_ctob(td_cmd, td_offset,
2072 I40E_MAX_DATA_PER_TXD, td_tag);
2076 if (i == tx_ring->count) {
2077 tx_desc = I40E_TX_DESC(tx_ring, 0);
2081 dma += I40E_MAX_DATA_PER_TXD;
2082 size -= I40E_MAX_DATA_PER_TXD;
2084 tx_desc->buffer_addr = cpu_to_le64(dma);
2087 if (likely(!data_len))
2090 tx_desc->cmd_type_offset_bsz = build_ctob(td_cmd, td_offset,
2095 if (i == tx_ring->count) {
2096 tx_desc = I40E_TX_DESC(tx_ring, 0);
2100 size = skb_frag_size(frag);
2103 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
2106 tx_bi = &tx_ring->tx_bi[i];
2109 /* Place RS bit on last descriptor of any packet that spans across the
2110 * 4th descriptor (WB_STRIDE aka 0x3) in a 64B cacheline.
2112 #define WB_STRIDE 0x3
2113 if (((i & WB_STRIDE) != WB_STRIDE) &&
2114 (first <= &tx_ring->tx_bi[i]) &&
2115 (first >= &tx_ring->tx_bi[i & ~WB_STRIDE])) {
2116 tx_desc->cmd_type_offset_bsz =
2117 build_ctob(td_cmd, td_offset, size, td_tag) |
2118 cpu_to_le64((u64)I40E_TX_DESC_CMD_EOP <<
2119 I40E_TXD_QW1_CMD_SHIFT);
2121 tx_desc->cmd_type_offset_bsz =
2122 build_ctob(td_cmd, td_offset, size, td_tag) |
2123 cpu_to_le64((u64)I40E_TXD_CMD <<
2124 I40E_TXD_QW1_CMD_SHIFT);
2127 netdev_tx_sent_queue(netdev_get_tx_queue(tx_ring->netdev,
2128 tx_ring->queue_index),
2131 /* set the timestamp */
2132 first->time_stamp = jiffies;
2134 /* Force memory writes to complete before letting h/w
2135 * know there are new descriptors to fetch. (Only
2136 * applicable for weak-ordered memory model archs,
2141 /* set next_to_watch value indicating a packet is present */
2142 first->next_to_watch = tx_desc;
2145 if (i == tx_ring->count)
2148 tx_ring->next_to_use = i;
2150 /* notify HW of packet */
2151 writel(i, tx_ring->tail);
2156 dev_info(tx_ring->dev, "TX DMA map failed\n");
2158 /* clear dma mappings for failed tx_bi map */
2160 tx_bi = &tx_ring->tx_bi[i];
2161 i40e_unmap_and_free_tx_resource(tx_ring, tx_bi);
2169 tx_ring->next_to_use = i;
2173 * __i40e_maybe_stop_tx - 2nd level check for tx stop conditions
2174 * @tx_ring: the ring to be checked
2175 * @size: the size buffer we want to assure is available
2177 * Returns -EBUSY if a stop is needed, else 0
2179 static inline int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
2181 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
2182 /* Memory barrier before checking head and tail */
2185 /* Check again in a case another CPU has just made room available. */
2186 if (likely(I40E_DESC_UNUSED(tx_ring) < size))
2189 /* A reprieve! - use start_queue because it doesn't call schedule */
2190 netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
2191 ++tx_ring->tx_stats.restart_queue;
2196 * i40e_maybe_stop_tx - 1st level check for tx stop conditions
2197 * @tx_ring: the ring to be checked
2198 * @size: the size buffer we want to assure is available
2200 * Returns 0 if stop is not needed
2202 static int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
2204 if (likely(I40E_DESC_UNUSED(tx_ring) >= size))
2206 return __i40e_maybe_stop_tx(tx_ring, size);
2210 * i40e_xmit_descriptor_count - calculate number of tx descriptors needed
2212 * @tx_ring: ring to send buffer on
2214 * Returns number of data descriptors needed for this skb. Returns 0 to indicate
2215 * there is not enough descriptors available in this ring since we need at least
2218 static int i40e_xmit_descriptor_count(struct sk_buff *skb,
2219 struct i40e_ring *tx_ring)
2224 /* need: 1 descriptor per page * PAGE_SIZE/I40E_MAX_DATA_PER_TXD,
2225 * + 1 desc for skb_head_len/I40E_MAX_DATA_PER_TXD,
2226 * + 4 desc gap to avoid the cache line where head is,
2227 * + 1 desc for context descriptor,
2228 * otherwise try next time
2230 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
2231 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
2233 count += TXD_USE_COUNT(skb_headlen(skb));
2234 if (i40e_maybe_stop_tx(tx_ring, count + 4 + 1)) {
2235 tx_ring->tx_stats.tx_busy++;
2242 * i40e_xmit_frame_ring - Sends buffer on Tx ring
2244 * @tx_ring: ring to send buffer on
2246 * Returns NETDEV_TX_OK if sent, else an error code
2248 static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
2249 struct i40e_ring *tx_ring)
2251 u64 cd_type_cmd_tso_mss = I40E_TX_DESC_DTYPE_CONTEXT;
2252 u32 cd_tunneling = 0, cd_l2tag2 = 0;
2253 struct i40e_tx_buffer *first;
2261 if (0 == i40e_xmit_descriptor_count(skb, tx_ring))
2262 return NETDEV_TX_BUSY;
2264 /* prepare the xmit flags */
2265 if (i40e_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags))
2268 /* obtain protocol of skb */
2269 protocol = skb->protocol;
2271 /* record the location of the first descriptor for this packet */
2272 first = &tx_ring->tx_bi[tx_ring->next_to_use];
2274 /* setup IPv4/IPv6 offloads */
2275 if (protocol == htons(ETH_P_IP))
2276 tx_flags |= I40E_TX_FLAGS_IPV4;
2277 else if (protocol == htons(ETH_P_IPV6))
2278 tx_flags |= I40E_TX_FLAGS_IPV6;
2280 tso = i40e_tso(tx_ring, skb, tx_flags, protocol, &hdr_len,
2281 &cd_type_cmd_tso_mss, &cd_tunneling);
2286 tx_flags |= I40E_TX_FLAGS_TSO;
2288 skb_tx_timestamp(skb);
2290 tsyn = i40e_tsyn(tx_ring, skb, tx_flags, &cd_type_cmd_tso_mss);
2293 tx_flags |= I40E_TX_FLAGS_TSYN;
2295 /* always enable CRC insertion offload */
2296 td_cmd |= I40E_TX_DESC_CMD_ICRC;
2298 /* Always offload the checksum, since it's in the data descriptor */
2299 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2300 tx_flags |= I40E_TX_FLAGS_CSUM;
2302 i40e_tx_enable_csum(skb, tx_flags, &td_cmd, &td_offset,
2303 tx_ring, &cd_tunneling);
2306 i40e_create_tx_ctx(tx_ring, cd_type_cmd_tso_mss,
2307 cd_tunneling, cd_l2tag2);
2309 /* Add Flow Director ATR if it's enabled.
2311 * NOTE: this must always be directly before the data descriptor.
2313 i40e_atr(tx_ring, skb, tx_flags, protocol);
2315 i40e_tx_map(tx_ring, skb, first, tx_flags, hdr_len,
2318 i40e_maybe_stop_tx(tx_ring, DESC_NEEDED);
2320 return NETDEV_TX_OK;
2323 dev_kfree_skb_any(skb);
2324 return NETDEV_TX_OK;
2328 * i40e_lan_xmit_frame - Selects the correct VSI and Tx queue to send buffer
2330 * @netdev: network interface device structure
2332 * Returns NETDEV_TX_OK if sent, else an error code
2334 netdev_tx_t i40e_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
2336 struct i40e_netdev_priv *np = netdev_priv(netdev);
2337 struct i40e_vsi *vsi = np->vsi;
2338 struct i40e_ring *tx_ring = vsi->tx_rings[skb->queue_mapping];
2340 /* hardware can't handle really short frames, hardware padding works
2343 if (unlikely(skb->len < I40E_MIN_TX_LEN)) {
2344 if (skb_pad(skb, I40E_MIN_TX_LEN - skb->len))
2345 return NETDEV_TX_OK;
2346 skb->len = I40E_MIN_TX_LEN;
2347 skb_set_tail_pointer(skb, I40E_MIN_TX_LEN);
2350 return i40e_xmit_frame_ring(skb, tx_ring);