#define MVNETA_TXQ_SENT_THRESH_MASK(coal) ((coal) << 16)
#define MVNETA_TXQ_UPDATE_REG(q) (0x3c60 + ((q) << 2))
#define MVNETA_TXQ_DEC_SENT_SHIFT 16
+#define MVNETA_TXQ_DEC_SENT_MASK 0xff
#define MVNETA_TXQ_STATUS_REG(q) (0x3c40 + ((q) << 2))
#define MVNETA_TXQ_SENT_DESC_SHIFT 16
#define MVNETA_TXQ_SENT_DESC_MASK 0x3fff0000
* descriptor ring
*/
int count;
+ int pending;
int tx_stop_threshold;
int tx_wake_threshold;
}
/* Get System Network Statistics */
-static struct rtnl_link_stats64 *
+static void
mvneta_get_stats64(struct net_device *dev,
struct rtnl_link_stats64 *stats)
{
stats->rx_dropped = dev->stats.rx_dropped;
stats->tx_dropped = dev->stats.tx_dropped;
-
- return stats;
}
/* Rx descriptors helper methods */
/* Only 255 descriptors can be added at once ; Assume caller
* process TX desriptors in quanta less than 256
*/
- val = pend_desc;
+ val = pend_desc + txq->pending;
mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val);
+ txq->pending = 0;
}
/* Get pointer to next TX descriptor to be processed (send) by HW */
/* Free tx queue skbuffs */
static void mvneta_txq_bufs_free(struct mvneta_port *pp,
- struct mvneta_tx_queue *txq, int num)
+ struct mvneta_tx_queue *txq, int num,
+ struct netdev_queue *nq)
{
+ unsigned int bytes_compl = 0, pkts_compl = 0;
int i;
for (i = 0; i < num; i++) {
txq->txq_get_index;
struct sk_buff *skb = txq->tx_skb[txq->txq_get_index];
+ if (skb) {
+ bytes_compl += skb->len;
+ pkts_compl++;
+ }
+
mvneta_txq_inc_get(txq);
if (!IS_TSO_HEADER(txq, tx_desc->buf_phys_addr))
continue;
dev_kfree_skb_any(skb);
}
+
+ netdev_tx_completed_queue(nq, pkts_compl, bytes_compl);
}
/* Handle end of transmission */
if (!tx_done)
return;
- mvneta_txq_bufs_free(pp, txq, tx_done);
+ mvneta_txq_bufs_free(pp, txq, tx_done, nq);
txq->count -= tx_done;
struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
struct netdev_queue *nq = netdev_get_tx_queue(dev, txq_id);
- txq->count += frags;
- mvneta_txq_pend_desc_add(pp, txq, frags);
+ netdev_tx_sent_queue(nq, len);
+ txq->count += frags;
if (txq->count >= txq->tx_stop_threshold)
netif_tx_stop_queue(nq);
+ if (!skb->xmit_more || netif_xmit_stopped(nq) ||
+ txq->pending + frags > MVNETA_TXQ_DEC_SENT_MASK)
+ mvneta_txq_pend_desc_add(pp, txq, frags);
+ else
+ txq->pending += frags;
+
u64_stats_update_begin(&stats->syncp);
stats->tx_packets++;
stats->tx_bytes += len;
struct mvneta_tx_queue *txq)
{
+ struct netdev_queue *nq = netdev_get_tx_queue(pp->dev, txq->id);
int tx_done = txq->count;
- mvneta_txq_bufs_free(pp, txq, tx_done);
+ mvneta_txq_bufs_free(pp, txq, tx_done, nq);
/* reset txq */
txq->count = 0;
rx_done = mvneta_rx_swbm(pp, budget, &pp->rxqs[rx_queue]);
}
- budget -= rx_done;
-
- if (budget > 0) {
+ if (rx_done < budget) {
cause_rx_tx = 0;
- napi_complete(napi);
+ napi_complete_done(napi, rx_done);
if (pp->neta_armada3700) {
unsigned long flags;
static void mvneta_txq_deinit(struct mvneta_port *pp,
struct mvneta_tx_queue *txq)
{
+ struct netdev_queue *nq = netdev_get_tx_queue(pp->dev, txq->id);
+
kfree(txq->tx_skb);
if (txq->tso_hdrs)
txq->size * MVNETA_DESC_ALIGNED_SIZE,
txq->descs, txq->descs_phys);
+ netdev_tx_reset_queue(nq);
+
txq->descs = NULL;
txq->last_desc = 0;
txq->next_desc_to_proc = 0;