* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
*/
-#include <linux/kfifo.h>
#include "core.h"
#include "dp_tx.h"
#include "hal_tx.h"
ath11k_dp_tx_pending_cleanup, ab);
idr_destroy(&dp->tx_ring[i].txbuf_idr);
spin_unlock_bh(&dp->tx_ring[i].tx_idr_lock);
-
- spin_lock_bh(&dp->tx_ring[i].tx_status_lock);
- kfifo_free(&dp->tx_ring[i].tx_status_fifo);
- spin_unlock_bh(&dp->tx_ring[i].tx_status_lock);
+ kfree(dp->tx_ring[i].tx_status);
}
/* Deinit any SOC level resource */
if (ret)
goto fail_link_desc_cleanup;
- size = roundup_pow_of_two(DP_TX_COMP_RING_SIZE);
+ size = sizeof(struct hal_wbm_release_ring) * DP_TX_COMP_RING_SIZE;
for (i = 0; i < DP_TCL_NUM_RING_MAX; i++) {
idr_init(&dp->tx_ring[i].txbuf_idr);
spin_lock_init(&dp->tx_ring[i].tx_idr_lock);
dp->tx_ring[i].tcl_data_ring_id = i;
- spin_lock_init(&dp->tx_ring[i].tx_status_lock);
- ret = kfifo_alloc(&dp->tx_ring[i].tx_status_fifo, size,
- GFP_KERNEL);
- if (ret)
+ dp->tx_ring[i].tx_status_head = 0;
+ dp->tx_ring[i].tx_status_tail = DP_TX_COMP_RING_SIZE - 1;
+ dp->tx_ring[i].tx_status = kmalloc(size, GFP_KERNEL);
+ if (!dp->tx_ring[i].tx_status)
goto fail_cmn_srng_cleanup;
}
struct hal_srng *tcl_ring;
struct ieee80211_hdr *hdr = (void *)skb->data;
struct dp_tx_ring *tx_ring;
- u8 cached_desc[HAL_TCL_DESC_LEN];
void *hal_tcl_desc;
u8 pool_id;
u8 hal_ring_id;
skb_cb->vif = arvif->vif;
skb_cb->ar = ar;
- ath11k_hal_tx_cmd_desc_setup(ab, cached_desc, &ti);
-
hal_ring_id = tx_ring->tcl_data_ring.ring_id;
tcl_ring = &ab->hal.srng_list[hal_ring_id];
goto fail_unmap_dma;
}
- ath11k_hal_tx_desc_sync(cached_desc, hal_tcl_desc);
+ ath11k_hal_tx_cmd_desc_setup(ab, hal_tcl_desc +
+ sizeof(struct hal_tlv_hdr), &ti);
ath11k_hal_srng_access_end(ab, tcl_ring);
int hal_ring_id = dp->tx_ring[ring_id].tcl_comp_ring.ring_id;
struct hal_srng *status_ring = &ab->hal.srng_list[hal_ring_id];
struct sk_buff *msdu;
- struct hal_wbm_release_ring tx_status;
struct hal_tx_status ts;
struct dp_tx_ring *tx_ring = &dp->tx_ring[ring_id];
u32 *desc;
u32 msdu_id;
u8 mac_id;
- spin_lock_bh(&status_ring->lock);
-
ath11k_hal_srng_access_begin(ab, status_ring);
- spin_lock_bh(&tx_ring->tx_status_lock);
- while (!kfifo_is_full(&tx_ring->tx_status_fifo) &&
+ while ((ATH11K_TX_COMPL_NEXT(tx_ring->tx_status_head) !=
+ tx_ring->tx_status_tail) &&
(desc = ath11k_hal_srng_dst_get_next_entry(ab, status_ring))) {
- ath11k_hal_tx_status_desc_sync((void *)desc,
- (void *)&tx_status);
- kfifo_put(&tx_ring->tx_status_fifo, tx_status);
+ memcpy(&tx_ring->tx_status[tx_ring->tx_status_head],
+ desc, sizeof(struct hal_wbm_release_ring));
+ tx_ring->tx_status_head =
+ ATH11K_TX_COMPL_NEXT(tx_ring->tx_status_head);
}
if ((ath11k_hal_srng_dst_peek(ab, status_ring) != NULL) &&
- kfifo_is_full(&tx_ring->tx_status_fifo)) {
+ (ATH11K_TX_COMPL_NEXT(tx_ring->tx_status_head) == tx_ring->tx_status_tail)) {
/* TODO: Process pending tx_status messages when kfifo_is_full() */
ath11k_warn(ab, "Unable to process some of the tx_status ring desc because status_fifo is full\n");
}
- spin_unlock_bh(&tx_ring->tx_status_lock);
-
ath11k_hal_srng_access_end(ab, status_ring);
- spin_unlock_bh(&status_ring->lock);
- spin_lock_bh(&tx_ring->tx_status_lock);
- while (kfifo_get(&tx_ring->tx_status_fifo, &tx_status)) {
- memset(&ts, 0, sizeof(ts));
- ath11k_hal_tx_status_parse(ab, &tx_status, &ts);
+ while (ATH11K_TX_COMPL_NEXT(tx_ring->tx_status_tail) != tx_ring->tx_status_head) {
+ struct hal_wbm_release_ring *tx_status;
+
+ tx_ring->tx_status_tail =
+ ATH11K_TX_COMPL_NEXT(tx_ring->tx_status_tail);
+ tx_status = &tx_ring->tx_status[tx_ring->tx_status_tail];
+ ath11k_hal_tx_status_parse(ab, tx_status, &ts);
mac_id = FIELD_GET(DP_TX_DESC_ID_MAC_ID, ts.desc_id);
msdu_id = FIELD_GET(DP_TX_DESC_ID_MSDU_ID, ts.desc_id);
if (ts.buf_rel_source == HAL_WBM_REL_SRC_MODULE_FW) {
ath11k_dp_tx_process_htt_tx_complete(ab,
- (void *)&tx_status,
+ (void *)tx_status,
mac_id, msdu_id,
tx_ring);
continue;
if (atomic_dec_and_test(&ar->dp.num_tx_pending))
wake_up(&ar->dp.tx_empty_waitq);
- /* TODO: Locking optimization so that tx_completion for an msdu
- * is not called with tx_status_lock acquired
- */
ath11k_dp_tx_complete_msdu(ar, msdu, &ts);
}
- spin_unlock_bh(&tx_ring->tx_status_lock);
}
int ath11k_dp_tx_send_reo_cmd(struct ath11k_base *ab, struct dp_rx_tid *rx_tid,