/* Copyright (C) 2023 Intel Corporation */
#include <net/libeth/rx.h>
+#include <net/libeth/tx.h>
#include "idpf.h"
/* record length, and DMA address */
dma_unmap_len_set(tx_buf, len, size);
dma_unmap_addr_set(tx_buf, dma, dma);
+ tx_buf->type = LIBETH_SQE_FRAG;
/* align size to end of page */
max_data += -dma & (IDPF_TX_MAX_READ_REQ_SIZE - 1);
i = 0;
}
+ tx_q->tx_buf[i].type = LIBETH_SQE_EMPTY;
+
dma += max_data;
size -= max_data;
tx_desc->qw1 = idpf_tx_singleq_build_ctob(td_cmd, offsets,
size, td_tag);
- IDPF_SINGLEQ_BUMP_RING_IDX(tx_q, i);
+ first->type = LIBETH_SQE_SKB;
+ first->rs_idx = i;
- /* set next_to_watch value indicating a packet is present */
- first->next_to_watch = tx_desc;
+ IDPF_SINGLEQ_BUMP_RING_IDX(tx_q, i);
nq = netdev_get_tx_queue(tx_q->netdev, tx_q->idx);
- netdev_tx_sent_queue(nq, first->bytecount);
+ netdev_tx_sent_queue(nq, first->bytes);
idpf_tx_buf_hw_update(tx_q, i, netdev_xmit_more());
}
struct idpf_base_tx_ctx_desc *ctx_desc;
int ntu = txq->next_to_use;
- memset(&txq->tx_buf[ntu], 0, sizeof(struct idpf_tx_buf));
- txq->tx_buf[ntu].ctx_entry = true;
+ txq->tx_buf[ntu].type = LIBETH_SQE_CTX;
ctx_desc = &txq->base_ctx[ntu];
first->skb = skb;
if (tso) {
- first->gso_segs = offload.tso_segs;
- first->bytecount = skb->len + ((first->gso_segs - 1) * offload.tso_hdr_len);
+ first->packets = offload.tso_segs;
+ first->bytes = skb->len + ((first->packets - 1) * offload.tso_hdr_len);
} else {
- first->bytecount = max_t(unsigned int, skb->len, ETH_ZLEN);
- first->gso_segs = 1;
+ first->bytes = max_t(unsigned int, skb->len, ETH_ZLEN);
+ first->packets = 1;
}
idpf_tx_singleq_map(tx_q, first, &offload);
static bool idpf_tx_singleq_clean(struct idpf_tx_queue *tx_q, int napi_budget,
int *cleaned)
{
- unsigned int total_bytes = 0, total_pkts = 0;
+ struct libeth_sq_napi_stats ss = { };
struct idpf_base_tx_desc *tx_desc;
u32 budget = tx_q->clean_budget;
s16 ntc = tx_q->next_to_clean;
+ struct libeth_cq_pp cp = {
+ .dev = tx_q->dev,
+ .ss = &ss,
+ .napi = napi_budget,
+ };
struct idpf_netdev_priv *np;
struct idpf_tx_buf *tx_buf;
struct netdev_queue *nq;
* such. We can skip this descriptor since there is no buffer
* to clean.
*/
- if (tx_buf->ctx_entry) {
- /* Clear this flag here to avoid stale flag values when
- * this buffer is used for actual data in the future.
- * There are cases where the tx_buf struct / the flags
- * field will not be cleared before being reused.
- */
- tx_buf->ctx_entry = false;
+ if (unlikely(tx_buf->type <= LIBETH_SQE_CTX)) {
+ tx_buf->type = LIBETH_SQE_EMPTY;
goto fetch_next_txq_desc;
}
- /* if next_to_watch is not set then no work pending */
- eop_desc = (struct idpf_base_tx_desc *)tx_buf->next_to_watch;
- if (!eop_desc)
- break;
-
- /* prevent any other reads prior to eop_desc */
+ /* prevent any other reads prior to type */
smp_rmb();
+ eop_desc = &tx_q->base_tx[tx_buf->rs_idx];
+
/* if the descriptor isn't done, no work yet to do */
if (!(eop_desc->qw1 &
cpu_to_le64(IDPF_TX_DESC_DTYPE_DESC_DONE)))
break;
- /* clear next_to_watch to prevent false hangs */
- tx_buf->next_to_watch = NULL;
-
/* update the statistics for this packet */
- total_bytes += tx_buf->bytecount;
- total_pkts += tx_buf->gso_segs;
-
- napi_consume_skb(tx_buf->skb, napi_budget);
-
- /* unmap skb header data */
- dma_unmap_single(tx_q->dev,
- dma_unmap_addr(tx_buf, dma),
- dma_unmap_len(tx_buf, len),
- DMA_TO_DEVICE);
-
- /* clear tx_buf data */
- tx_buf->skb = NULL;
- dma_unmap_len_set(tx_buf, len, 0);
+ libeth_tx_complete(tx_buf, &cp);
/* unmap remaining buffers */
while (tx_desc != eop_desc) {
}
/* unmap any remaining paged data */
- if (dma_unmap_len(tx_buf, len)) {
- dma_unmap_page(tx_q->dev,
- dma_unmap_addr(tx_buf, dma),
- dma_unmap_len(tx_buf, len),
- DMA_TO_DEVICE);
- dma_unmap_len_set(tx_buf, len, 0);
- }
+ libeth_tx_complete(tx_buf, &cp);
}
/* update budget only if we did something */
ntc += tx_q->desc_count;
tx_q->next_to_clean = ntc;
- *cleaned += total_pkts;
+ *cleaned += ss.packets;
u64_stats_update_begin(&tx_q->stats_sync);
- u64_stats_add(&tx_q->q_stats.packets, total_pkts);
- u64_stats_add(&tx_q->q_stats.bytes, total_bytes);
+ u64_stats_add(&tx_q->q_stats.packets, ss.packets);
+ u64_stats_add(&tx_q->q_stats.bytes, ss.bytes);
u64_stats_update_end(&tx_q->stats_sync);
np = netdev_priv(tx_q->netdev);
dont_wake = np->state != __IDPF_VPORT_UP ||
!netif_carrier_ok(tx_q->netdev);
- __netif_txq_completed_wake(nq, total_pkts, total_bytes,
+ __netif_txq_completed_wake(nq, ss.packets, ss.bytes,
IDPF_DESC_UNUSED(tx_q), IDPF_TX_WAKE_THRESH,
dont_wake);
/* Copyright (C) 2023 Intel Corporation */
#include <net/libeth/rx.h>
+#include <net/libeth/tx.h>
#include "idpf.h"
#include "idpf_virtchnl.h"
+struct idpf_tx_stash {
+ struct hlist_node hlist;
+ struct libeth_sqe buf;
+};
+
+#define idpf_tx_buf_compl_tag(buf) (*(int *)&(buf)->priv)
+LIBETH_SQE_CHECK_PRIV(int);
+
static bool idpf_chk_linearize(struct sk_buff *skb, unsigned int max_bufs,
unsigned int count);
}
}
-/**
- * idpf_tx_buf_rel - Release a Tx buffer
- * @tx_q: the queue that owns the buffer
- * @tx_buf: the buffer to free
- */
-static void idpf_tx_buf_rel(struct idpf_tx_queue *tx_q,
- struct idpf_tx_buf *tx_buf)
-{
- if (tx_buf->skb) {
- if (dma_unmap_len(tx_buf, len))
- dma_unmap_single(tx_q->dev,
- dma_unmap_addr(tx_buf, dma),
- dma_unmap_len(tx_buf, len),
- DMA_TO_DEVICE);
- dev_kfree_skb_any(tx_buf->skb);
- } else if (dma_unmap_len(tx_buf, len)) {
- dma_unmap_page(tx_q->dev,
- dma_unmap_addr(tx_buf, dma),
- dma_unmap_len(tx_buf, len),
- DMA_TO_DEVICE);
- }
-
- tx_buf->next_to_watch = NULL;
- tx_buf->skb = NULL;
- tx_buf->compl_tag = IDPF_SPLITQ_TX_INVAL_COMPL_TAG;
- dma_unmap_len_set(tx_buf, len, 0);
-}
-
/**
* idpf_tx_buf_rel_all - Free any empty Tx buffers
* @txq: queue to be cleaned
*/
static void idpf_tx_buf_rel_all(struct idpf_tx_queue *txq)
{
+ struct libeth_sq_napi_stats ss = { };
struct idpf_buf_lifo *buf_stack;
+ struct libeth_cq_pp cp = {
+ .dev = txq->dev,
+ .ss = &ss,
+ };
u16 i;
/* Buffers already cleared, nothing to do */
/* Free all the Tx buffer sk_buffs */
for (i = 0; i < txq->desc_count; i++)
- idpf_tx_buf_rel(txq, &txq->tx_buf[i]);
+ libeth_tx_complete(&txq->tx_buf[i], &cp);
kfree(txq->tx_buf);
txq->tx_buf = NULL;
if (!tx_q->tx_buf)
return -ENOMEM;
- /* Initialize tx_bufs with invalid completion tags */
- for (i = 0; i < tx_q->desc_count; i++)
- tx_q->tx_buf[i].compl_tag = IDPF_SPLITQ_TX_INVAL_COMPL_TAG;
-
if (!idpf_queue_has(FLOW_SCH_EN, tx_q))
return 0;
wake_up(&vport->sw_marker_wq);
}
-/**
- * idpf_tx_splitq_clean_hdr - Clean TX buffer resources for header portion of
- * packet
- * @tx_q: tx queue to clean buffer from
- * @tx_buf: buffer to be cleaned
- * @cleaned: pointer to stats struct to track cleaned packets/bytes
- * @napi_budget: Used to determine if we are in netpoll
- */
-static void idpf_tx_splitq_clean_hdr(struct idpf_tx_queue *tx_q,
- struct idpf_tx_buf *tx_buf,
- struct idpf_cleaned_stats *cleaned,
- int napi_budget)
-{
- napi_consume_skb(tx_buf->skb, napi_budget);
-
- if (dma_unmap_len(tx_buf, len)) {
- dma_unmap_single(tx_q->dev,
- dma_unmap_addr(tx_buf, dma),
- dma_unmap_len(tx_buf, len),
- DMA_TO_DEVICE);
-
- dma_unmap_len_set(tx_buf, len, 0);
- }
-
- /* clear tx_buf data */
- tx_buf->skb = NULL;
-
- cleaned->bytes += tx_buf->bytecount;
- cleaned->packets += tx_buf->gso_segs;
-}
-
/**
* idpf_tx_clean_stashed_bufs - clean bufs that were stored for
* out of order completions
*/
static void idpf_tx_clean_stashed_bufs(struct idpf_tx_queue *txq,
u16 compl_tag,
- struct idpf_cleaned_stats *cleaned,
+ struct libeth_sq_napi_stats *cleaned,
int budget)
{
struct idpf_tx_stash *stash;
struct hlist_node *tmp_buf;
+ struct libeth_cq_pp cp = {
+ .dev = txq->dev,
+ .ss = cleaned,
+ .napi = budget,
+ };
/* Buffer completion */
hash_for_each_possible_safe(txq->stash->sched_buf_hash, stash, tmp_buf,
hlist, compl_tag) {
- if (unlikely(stash->buf.compl_tag != (int)compl_tag))
+ if (unlikely(idpf_tx_buf_compl_tag(&stash->buf) !=
+ (int)compl_tag))
continue;
- if (stash->buf.skb) {
- idpf_tx_splitq_clean_hdr(txq, &stash->buf, cleaned,
- budget);
- } else if (dma_unmap_len(&stash->buf, len)) {
- dma_unmap_page(txq->dev,
- dma_unmap_addr(&stash->buf, dma),
- dma_unmap_len(&stash->buf, len),
- DMA_TO_DEVICE);
- dma_unmap_len_set(&stash->buf, len, 0);
- }
+ libeth_tx_complete(&stash->buf, &cp);
/* Push shadow buf back onto stack */
idpf_buf_lifo_push(&txq->stash->buf_stack, stash);
{
struct idpf_tx_stash *stash;
- if (unlikely(!dma_unmap_addr(tx_buf, dma) &&
- !dma_unmap_len(tx_buf, len)))
+ if (unlikely(tx_buf->type <= LIBETH_SQE_CTX))
return 0;
stash = idpf_buf_lifo_pop(&txq->stash->buf_stack);
/* Store buffer params in shadow buffer */
stash->buf.skb = tx_buf->skb;
- stash->buf.bytecount = tx_buf->bytecount;
- stash->buf.gso_segs = tx_buf->gso_segs;
+ stash->buf.bytes = tx_buf->bytes;
+ stash->buf.packets = tx_buf->packets;
+ stash->buf.type = tx_buf->type;
dma_unmap_addr_set(&stash->buf, dma, dma_unmap_addr(tx_buf, dma));
dma_unmap_len_set(&stash->buf, len, dma_unmap_len(tx_buf, len));
- stash->buf.compl_tag = tx_buf->compl_tag;
+ idpf_tx_buf_compl_tag(&stash->buf) = idpf_tx_buf_compl_tag(tx_buf);
/* Add buffer to buf_hash table to be freed later */
hash_add(txq->stash->sched_buf_hash, &stash->hlist,
- stash->buf.compl_tag);
-
- memset(tx_buf, 0, sizeof(struct idpf_tx_buf));
+ idpf_tx_buf_compl_tag(&stash->buf));
- /* Reinitialize buf_id portion of tag */
- tx_buf->compl_tag = IDPF_SPLITQ_TX_INVAL_COMPL_TAG;
+ tx_buf->type = LIBETH_SQE_EMPTY;
return 0;
}
*/
static void idpf_tx_splitq_clean(struct idpf_tx_queue *tx_q, u16 end,
int napi_budget,
- struct idpf_cleaned_stats *cleaned,
+ struct libeth_sq_napi_stats *cleaned,
bool descs_only)
{
union idpf_tx_flex_desc *next_pending_desc = NULL;
union idpf_tx_flex_desc *tx_desc;
s16 ntc = tx_q->next_to_clean;
+ struct libeth_cq_pp cp = {
+ .dev = tx_q->dev,
+ .ss = cleaned,
+ .napi = napi_budget,
+ };
struct idpf_tx_buf *tx_buf;
tx_desc = &tx_q->flex_tx[ntc];
* invalid completion tag since no buffer was used. We can
* skip this descriptor since there is no buffer to clean.
*/
- if (unlikely(tx_buf->compl_tag == IDPF_SPLITQ_TX_INVAL_COMPL_TAG))
+ if (tx_buf->type <= LIBETH_SQE_CTX)
goto fetch_next_txq_desc;
- eop_desc = (union idpf_tx_flex_desc *)tx_buf->next_to_watch;
-
- /* clear next_to_watch to prevent false hangs */
- tx_buf->next_to_watch = NULL;
+ eop_desc = &tx_q->flex_tx[tx_buf->rs_idx];
if (descs_only) {
if (idpf_stash_flow_sch_buffers(tx_q, tx_buf))
}
}
} else {
- idpf_tx_splitq_clean_hdr(tx_q, tx_buf, cleaned,
- napi_budget);
+ libeth_tx_complete(tx_buf, &cp);
/* unmap remaining buffers */
while (tx_desc != eop_desc) {
tx_desc, tx_buf);
/* unmap any remaining paged data */
- if (dma_unmap_len(tx_buf, len)) {
- dma_unmap_page(tx_q->dev,
- dma_unmap_addr(tx_buf, dma),
- dma_unmap_len(tx_buf, len),
- DMA_TO_DEVICE);
- dma_unmap_len_set(tx_buf, len, 0);
- }
+ libeth_tx_complete(tx_buf, &cp);
}
}
* this completion tag.
*/
static bool idpf_tx_clean_buf_ring(struct idpf_tx_queue *txq, u16 compl_tag,
- struct idpf_cleaned_stats *cleaned,
+ struct libeth_sq_napi_stats *cleaned,
int budget)
{
u16 idx = compl_tag & txq->compl_tag_bufid_m;
struct idpf_tx_buf *tx_buf = NULL;
u16 ntc = txq->next_to_clean;
+ struct libeth_cq_pp cp = {
+ .dev = txq->dev,
+ .ss = cleaned,
+ .napi = budget,
+ };
u16 num_descs_cleaned = 0;
u16 orig_idx = idx;
tx_buf = &txq->tx_buf[idx];
+ if (unlikely(tx_buf->type <= LIBETH_SQE_CTX))
+ return false;
- while (tx_buf->compl_tag == (int)compl_tag) {
- if (tx_buf->skb) {
- idpf_tx_splitq_clean_hdr(txq, tx_buf, cleaned, budget);
- } else if (dma_unmap_len(tx_buf, len)) {
- dma_unmap_page(txq->dev,
- dma_unmap_addr(tx_buf, dma),
- dma_unmap_len(tx_buf, len),
- DMA_TO_DEVICE);
- dma_unmap_len_set(tx_buf, len, 0);
- }
-
- memset(tx_buf, 0, sizeof(struct idpf_tx_buf));
- tx_buf->compl_tag = IDPF_SPLITQ_TX_INVAL_COMPL_TAG;
+ while (idpf_tx_buf_compl_tag(tx_buf) == (int)compl_tag) {
+ libeth_tx_complete(tx_buf, &cp);
num_descs_cleaned++;
idpf_tx_clean_buf_ring_bump_ntc(txq, idx, tx_buf);
*/
static void idpf_tx_handle_rs_completion(struct idpf_tx_queue *txq,
struct idpf_splitq_tx_compl_desc *desc,
- struct idpf_cleaned_stats *cleaned,
+ struct libeth_sq_napi_stats *cleaned,
int budget)
{
u16 compl_tag;
ntc -= complq->desc_count;
do {
- struct idpf_cleaned_stats cleaned_stats = { };
+ struct libeth_sq_napi_stats cleaned_stats = { };
struct idpf_tx_queue *tx_q;
int rel_tx_qid;
u16 hw_head;
void idpf_tx_dma_map_error(struct idpf_tx_queue *txq, struct sk_buff *skb,
struct idpf_tx_buf *first, u16 idx)
{
+ struct libeth_sq_napi_stats ss = { };
+ struct libeth_cq_pp cp = {
+ .dev = txq->dev,
+ .ss = &ss,
+ };
+
u64_stats_update_begin(&txq->stats_sync);
u64_stats_inc(&txq->q_stats.dma_map_errs);
u64_stats_update_end(&txq->stats_sync);
struct idpf_tx_buf *tx_buf;
tx_buf = &txq->tx_buf[idx];
- idpf_tx_buf_rel(txq, tx_buf);
+ libeth_tx_complete(tx_buf, &cp);
if (tx_buf == first)
break;
if (idx == 0)
if (dma_mapping_error(tx_q->dev, dma))
return idpf_tx_dma_map_error(tx_q, skb, first, i);
- tx_buf->compl_tag = params->compl_tag;
+ idpf_tx_buf_compl_tag(tx_buf) = params->compl_tag;
+ tx_buf->type = LIBETH_SQE_FRAG;
/* record length, and DMA address */
dma_unmap_len_set(tx_buf, len, size);
* simply pass over these holes and finish cleaning the
* rest of the packet.
*/
- memset(&tx_q->tx_buf[i], 0, sizeof(struct idpf_tx_buf));
- tx_q->tx_buf[i].compl_tag = params->compl_tag;
+ tx_q->tx_buf[i].type = LIBETH_SQE_EMPTY;
/* Adjust the DMA offset and the remaining size of the
* fragment. On the first iteration of this loop,
/* record SW timestamp if HW timestamp is not available */
skb_tx_timestamp(skb);
+ first->type = LIBETH_SQE_SKB;
+
/* write last descriptor with RS and EOP bits */
+ first->rs_idx = i;
td_cmd |= params->eop_cmd;
idpf_tx_splitq_build_desc(tx_desc, params, td_cmd, size);
i = idpf_tx_splitq_bump_ntu(tx_q, i);
- /* set next_to_watch value indicating a packet is present */
- first->next_to_watch = tx_desc;
-
tx_q->txq_grp->num_completions_pending++;
/* record bytecount for BQL */
nq = netdev_get_tx_queue(tx_q->netdev, tx_q->idx);
- netdev_tx_sent_queue(nq, first->bytecount);
+ netdev_tx_sent_queue(nq, first->bytes);
idpf_tx_buf_hw_update(tx_q, i, netdev_xmit_more());
}
struct idpf_flex_tx_ctx_desc *desc;
int i = txq->next_to_use;
- memset(&txq->tx_buf[i], 0, sizeof(struct idpf_tx_buf));
- txq->tx_buf[i].compl_tag = IDPF_SPLITQ_TX_INVAL_COMPL_TAG;
+ txq->tx_buf[i].type = LIBETH_SQE_CTX;
/* grab the next descriptor */
desc = &txq->flex_ctx[i];
first->skb = skb;
if (tso) {
- first->gso_segs = tx_params.offload.tso_segs;
- first->bytecount = skb->len +
- ((first->gso_segs - 1) * tx_params.offload.tso_hdr_len);
+ first->packets = tx_params.offload.tso_segs;
+ first->bytes = skb->len +
+ ((first->packets - 1) * tx_params.offload.tso_hdr_len);
} else {
- first->gso_segs = 1;
- first->bytecount = max_t(unsigned int, skb->len, ETH_ZLEN);
+ first->packets = 1;
+ first->bytes = max_t(unsigned int, skb->len, ETH_ZLEN);
}
if (idpf_queue_has(FLOW_SCH_EN, tx_q)) {