#define DP_RX_BUFFER_SIZE_LITE 1024
#define DP_RX_BUFFER_ALIGN_SIZE 128
+#define RX_MON_STATUS_BASE_BUF_SIZE 2048
+#define RX_MON_STATUS_BUF_ALIGN 128
+#define RX_MON_STATUS_BUF_RESERVATION 128
+#define RX_MON_STATUS_BUF_SIZE (RX_MON_STATUS_BASE_BUF_SIZE - \
+ (RX_MON_STATUS_BUF_RESERVATION + \
+ RX_MON_STATUS_BUF_ALIGN + \
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info))))
+
#define DP_RXDMA_BUF_COOKIE_BUF_ID GENMASK(17, 0)
#define DP_RXDMA_BUF_COOKIE_PDEV_ID GENMASK(19, 18)
struct dp_srng rxdma_err_dst_ring[MAX_RXDMA_PER_PDEV];
struct dp_rxdma_mon_ring rxdma_mon_buf_ring;
struct dp_rxdma_mon_ring tx_mon_buf_ring;
+ struct dp_rxdma_mon_ring rx_mon_status_refill_ring[MAX_RXDMA_PER_PDEV];
struct ath12k_reo_q_addr_lut reoq_lut;
struct ath12k_reo_q_addr_lut ml_reoq_lut;
};
return -ENOMEM;
}
+int ath12k_dp_mon_status_bufs_replenish(struct ath12k_base *ab,
+ struct dp_rxdma_mon_ring *rx_ring,
+ int req_entries)
+{
+ enum hal_rx_buf_return_buf_manager mgr =
+ ab->hw_params->hal_params->rx_buf_rbm;
+ int num_free, num_remain, buf_id;
+ struct ath12k_buffer_addr *desc;
+ struct hal_srng *srng;
+ struct sk_buff *skb;
+ dma_addr_t paddr;
+ u32 cookie;
+
+ req_entries = min(req_entries, rx_ring->bufs_max);
+
+ srng = &ab->hal.srng_list[rx_ring->refill_buf_ring.ring_id];
+
+ spin_lock_bh(&srng->lock);
+
+ ath12k_hal_srng_access_begin(ab, srng);
+
+ num_free = ath12k_hal_srng_src_num_free(ab, srng, true);
+ if (!req_entries && (num_free > (rx_ring->bufs_max * 3) / 4))
+ req_entries = num_free;
+
+ req_entries = min(num_free, req_entries);
+ num_remain = req_entries;
+
+ while (num_remain > 0) {
+ skb = dev_alloc_skb(RX_MON_STATUS_BUF_SIZE);
+ if (!skb)
+ break;
+
+ if (!IS_ALIGNED((unsigned long)skb->data,
+ RX_MON_STATUS_BUF_ALIGN)) {
+ skb_pull(skb,
+ PTR_ALIGN(skb->data, RX_MON_STATUS_BUF_ALIGN) -
+ skb->data);
+ }
+
+ paddr = dma_map_single(ab->dev, skb->data,
+ skb->len + skb_tailroom(skb),
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(ab->dev, paddr))
+ goto fail_free_skb;
+
+ spin_lock_bh(&rx_ring->idr_lock);
+ buf_id = idr_alloc(&rx_ring->bufs_idr, skb, 0,
+ rx_ring->bufs_max * 3, GFP_ATOMIC);
+ spin_unlock_bh(&rx_ring->idr_lock);
+ if (buf_id < 0)
+ goto fail_dma_unmap;
+ cookie = u32_encode_bits(buf_id, DP_RXDMA_BUF_COOKIE_BUF_ID);
+
+ desc = ath12k_hal_srng_src_get_next_entry(ab, srng);
+ if (!desc)
+ goto fail_buf_unassign;
+
+ ATH12K_SKB_RXCB(skb)->paddr = paddr;
+
+ num_remain--;
+
+ ath12k_hal_rx_buf_addr_info_set(desc, paddr, cookie, mgr);
+ }
+
+ ath12k_hal_srng_access_end(ab, srng);
+
+ spin_unlock_bh(&srng->lock);
+
+ return req_entries - num_remain;
+
+fail_buf_unassign:
+ spin_lock_bh(&rx_ring->idr_lock);
+ idr_remove(&rx_ring->bufs_idr, buf_id);
+ spin_unlock_bh(&rx_ring->idr_lock);
+fail_dma_unmap:
+ dma_unmap_single(ab->dev, paddr, skb->len + skb_tailroom(skb),
+ DMA_FROM_DEVICE);
+fail_free_skb:
+ dev_kfree_skb_any(skb);
+
+ ath12k_hal_srng_access_end(ab, srng);
+
+ spin_unlock_bh(&srng->lock);
+
+ return req_entries - num_remain;
+}
+
static struct dp_mon_tx_ppdu_info *
ath12k_dp_mon_tx_get_ppdu_info(struct ath12k_mon_data *pmon,
unsigned int ppdu_id,
int ath12k_dp_mon_buf_replenish(struct ath12k_base *ab,
struct dp_rxdma_mon_ring *buf_ring,
int req_entries);
+int ath12k_dp_mon_status_bufs_replenish(struct ath12k_base *ab,
+ struct dp_rxdma_mon_ring *rx_ring,
+ int req_entries);
int ath12k_dp_mon_process_ring(struct ath12k_base *ab, int mac_id,
struct napi_struct *napi, int budget,
enum dp_monitor_mode monitor_mode);
static int ath12k_dp_rxdma_buf_free(struct ath12k_base *ab)
{
struct ath12k_dp *dp = &ab->dp;
+ int i;
ath12k_dp_rxdma_mon_buf_ring_free(ab, &dp->rxdma_mon_buf_ring);
+ if (ab->hw_params->rxdma1_enable)
+ return 0;
+
+ for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++)
+ ath12k_dp_rxdma_mon_buf_ring_free(ab,
+ &dp->rx_mon_status_refill_ring[i]);
+
return 0;
}
ath12k_hal_srng_get_entrysize(ab, ringtype);
rx_ring->bufs_max = num_entries;
- ath12k_dp_mon_buf_replenish(ab, rx_ring, num_entries);
+
+ if (ringtype == HAL_RXDMA_MONITOR_STATUS)
+ ath12k_dp_mon_status_bufs_replenish(ab, rx_ring,
+ num_entries);
+ else
+ ath12k_dp_mon_buf_replenish(ab, rx_ring, num_entries);
return 0;
}
static int ath12k_dp_rxdma_buf_setup(struct ath12k_base *ab)
{
struct ath12k_dp *dp = &ab->dp;
- int ret;
+ struct dp_rxdma_mon_ring *mon_ring;
+ int ret, i;
ret = ath12k_dp_rxdma_ring_buf_setup(ab, &dp->rx_refill_buf_ring);
if (ret) {
ret = ath12k_dp_rxdma_mon_ring_buf_setup(ab,
&dp->rxdma_mon_buf_ring,
HAL_RXDMA_MONITOR_BUF);
- if (ret) {
+ if (ret)
ath12k_warn(ab,
"failed to setup HAL_RXDMA_MONITOR_BUF\n");
+ return ret;
+ }
+
+ for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) {
+ mon_ring = &dp->rx_mon_status_refill_ring[i];
+ ret = ath12k_dp_rxdma_mon_ring_buf_setup(ab, mon_ring,
+ HAL_RXDMA_MONITOR_STATUS);
+ if (ret) {
+ ath12k_warn(ab,
+ "failed to setup HAL_RXDMA_MONITOR_STATUS\n");
return ret;
}
}
void ath12k_dp_rx_free(struct ath12k_base *ab)
{
struct ath12k_dp *dp = &ab->dp;
+ struct dp_srng *srng;
int i;
ath12k_dp_srng_cleanup(ab, &dp->rx_refill_buf_ring.refill_buf_ring);
for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) {
if (ab->hw_params->rx_mac_buf_ring)
ath12k_dp_srng_cleanup(ab, &dp->rx_mac_buf_ring[i]);
+ if (!ab->hw_params->rxdma1_enable) {
+ srng = &dp->rx_mon_status_refill_ring[i].refill_buf_ring;
+ ath12k_dp_srng_cleanup(ab, srng);
+ }
}
for (i = 0; i < ab->hw_params->num_rxdma_dst_ring; i++)
ret);
return ret;
}
+ } else {
+ for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) {
+ ring_id =
+ dp->rx_mon_status_refill_ring[i].refill_buf_ring.ring_id;
+ ret = ath12k_dp_tx_htt_srng_setup(ab, ring_id, i,
+ HAL_RXDMA_MONITOR_STATUS);
+ if (ret) {
+ ath12k_warn(ab,
+ "failed to configure mon_status_refill_ring%d %d\n",
+ i, ret);
+ return ret;
+ }
+ }
}
ret = ab->hw_params->hw_ops->rxdma_ring_sel_config(ab);
int ath12k_dp_rx_alloc(struct ath12k_base *ab)
{
struct ath12k_dp *dp = &ab->dp;
+ struct dp_srng *srng;
int i, ret;
idr_init(&dp->rxdma_mon_buf_ring.bufs_idr);
ath12k_warn(ab, "failed to setup HAL_RXDMA_MONITOR_BUF\n");
return ret;
}
+ } else {
+ for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) {
+ idr_init(&dp->rx_mon_status_refill_ring[i].bufs_idr);
+ spin_lock_init(&dp->rx_mon_status_refill_ring[i].idr_lock);
+ }
+
+ for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) {
+ srng = &dp->rx_mon_status_refill_ring[i].refill_buf_ring;
+ ret = ath12k_dp_srng_setup(ab, srng,
+ HAL_RXDMA_MONITOR_STATUS, 0, i,
+ DP_RXDMA_MON_STATUS_RING_SIZE);
+ if (ret) {
+ ath12k_warn(ab, "failed to setup mon status ring %d\n",
+ i);
+ return ret;
+ }
+ }
}
ret = ath12k_dp_rxdma_buf_setup(ab);
return ret;
}
}
+ return 0;
+ }
+
+ if (!reset) {
+ for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) {
+ ring_id = ab->dp.rx_mac_buf_ring[i].ring_id;
+ ret = ath12k_dp_tx_htt_rx_filter_setup(ar->ab, ring_id,
+ i,
+ HAL_RXDMA_BUF,
+ DP_RXDMA_REFILL_RING_SIZE,
+ &tlv_filter);
+ if (ret) {
+ ath12k_err(ab,
+ "failed to setup filter for mon rx buf %d\n",
+ ret);
+ return ret;
+ }
+ }
+ }
+
+ for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) {
+ ring_id = ab->dp.rx_mon_status_refill_ring[i].refill_buf_ring.ring_id;
+ if (!reset) {
+ tlv_filter.rx_filter =
+ HTT_RX_MON_FILTER_TLV_FLAGS_MON_STATUS_RING;
+ }
+
+ ret = ath12k_dp_tx_htt_rx_filter_setup(ab, ring_id,
+ i,
+ HAL_RXDMA_MONITOR_STATUS,
+ RX_MON_STATUS_BUF_SIZE,
+ &tlv_filter);
+ if (ret) {
+ ath12k_err(ab,
+ "failed to setup filter for mon status buf %d\n",
+ ret);
+ return ret;
+ }
}
return 0;